blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
80aae07c6ab4e34782a351c4c412129086bfe652
|
531c47c15b97cbcb263ec86821d7f258c81c0aaf
|
/sdk/labservices/azure-mgmt-labservices/azure/mgmt/labservices/models/environment_setting_fragment_py3.py
|
c7d3c076bda1a16b402b7d682886230622ce5461
|
[
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] |
permissive
|
YijunXieMS/azure-sdk-for-python
|
be364d3b88204fd3c7d223df23756386ff7a3361
|
f779de8e53dbec033f98f976284e6d9491fd60b3
|
refs/heads/master
| 2021-07-15T18:06:28.748507
| 2020-09-04T15:48:52
| 2020-09-04T15:48:52
| 205,457,088
| 1
| 2
|
MIT
| 2020-06-16T16:38:15
| 2019-08-30T21:08:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,465
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource_py3 import Resource
class EnvironmentSettingFragment(Resource):
"""Represents settings of an environment, from which environment instances
would be created.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The identifier of the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The location of the resource.
:type location: str
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param configuration_state: Describes the user's progress in configuring
their environment setting. Possible values include: 'NotApplicable',
'Completed'
:type configuration_state: str or
~azure.mgmt.labservices.models.ConfigurationState
:param description: Describes the environment and its resource settings
:type description: str
:param title: Brief title describing the environment and its resource
settings
:type title: str
:param resource_settings: The resource specific settings
:type resource_settings:
~azure.mgmt.labservices.models.ResourceSettingsFragment
:param provisioning_state: The provisioning status of the resource.
:type provisioning_state: str
:param unique_identifier: The unique immutable identifier of a resource
(Guid).
:type unique_identifier: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'configuration_state': {'key': 'properties.configurationState', 'type': 'str'},
'description': {'key': 'properties.description', 'type': 'str'},
'title': {'key': 'properties.title', 'type': 'str'},
'resource_settings': {'key': 'properties.resourceSettings', 'type': 'ResourceSettingsFragment'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'unique_identifier': {'key': 'properties.uniqueIdentifier', 'type': 'str'},
}
def __init__(self, *, location: str=None, tags=None, configuration_state=None, description: str=None, title: str=None, resource_settings=None, provisioning_state: str=None, unique_identifier: str=None, **kwargs) -> None:
super(EnvironmentSettingFragment, self).__init__(location=location, tags=tags, **kwargs)
self.configuration_state = configuration_state
self.description = description
self.title = title
self.resource_settings = resource_settings
self.provisioning_state = provisioning_state
self.unique_identifier = unique_identifier
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
c15dbfada0d946792fde27d9be3ce0e0f63b7e15
|
365913bcc02bfdf6b6f6c246855144663f7e052b
|
/rdkit/ML/Descriptors/UnitTestDescriptors.py
|
017f4b2d6f0f9e8f9534429c526d03e0308d7d58
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-3-Clause"
] |
permissive
|
UnixJunkie/rdkit
|
d8458eadca78ba1714be5c55ba75c8e164fc1479
|
3ddb54aeef0666aeaa2200d2137884ec05cb6451
|
refs/heads/master
| 2021-06-01T22:26:53.201525
| 2017-08-15T17:00:30
| 2017-08-15T17:00:30
| 100,572,461
| 2
| 0
|
NOASSERTION
| 2019-05-29T00:58:25
| 2017-08-17T07:03:53
|
C++
|
UTF-8
|
Python
| false
| false
| 1,659
|
py
|
#
# Copyright (C) 2001,2002 greg Landrum and Rational Discovery LLC
#
""" unit testing code for descriptors
"""
import unittest
from rdkit.ML.Descriptors import CompoundDescriptors
from rdkit.TestRunner import redirect_stdout
from rdkit.six import StringIO
class TestCase(unittest.TestCase):
def setUp(self):
d = [('DED', ['NonZero', 'Mean', 'Dev']), ('M_B_electroneg', ['NonZero']),
('Cov_rad', ['Max', 'Min'])]
self.desc = CompoundDescriptors.CompoundDescriptorCalculator(d)
self.desc.BuildAtomDict()
self.tol = 0.0001
def testAtomDict(self):
# " testing the atom dict "
assert len(self.desc.atomDict.keys()) == 48, 'BuildAtomDict failed'
def testSimpleDescriptorCalc(self):
# " testing simple descriptor calculation "
composList = ['Nb', 'Nb3', 'NbPt', 'Nb2Pt']
compare = [[2.32224798203, 0.0, 1.34000003338, 1.34000003338],
[2.32224798203, 0.0, 1.34000003338, 1.34000003338],
[1.51555249095, 0.806695491076, 1.34000003338, 1.29999995232],
[1.78445098797, 0.717062658734, 1.34000003338, 1.29999995232]]
for i in range(len(composList)):
self.assertTrue(
max(
map(lambda x, y: abs(x - y), compare[i], self.desc.CalcSimpleDescriptorsForComposition(
composList[i]))) < self.tol, 'Descriptor calculation failed')
names = self.desc.GetDescriptorNames()
self.assertEqual(len(names), 4)
self.assertIn('MEAN_DED', names)
def test_exampleCode(self):
f = StringIO()
with redirect_stdout(f):
CompoundDescriptors._exampleCode()
if __name__ == '__main__': # pragma: nocover
unittest.main()
|
[
"greg.landrum@gmail.com"
] |
greg.landrum@gmail.com
|
02870c37ceb356dfab187936f715d4a9e2a2bda0
|
75259be56c9b895970448a9e275405518cadf324
|
/src/cargos/sugar_beet.py
|
2b448e34f99c70c848987ffd39a9de6b2b283727
|
[] |
no_license
|
Azusa257/firs
|
5df946dea785515ef5303fd5ae7219bb222b9bb1
|
e824040c168c2863420d558bac64f8f01efc3e17
|
refs/heads/master
| 2023-06-10T09:36:48.358213
| 2021-06-06T20:34:16
| 2021-06-06T20:34:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
from cargo import Cargo
cargo = Cargo(
id="sugar_beet",
type_name="string(STR_CARGO_NAME_SUGAR_BEET)",
unit_name="string(STR_CARGO_NAME_SUGAR_BEET)",
type_abbreviation="string(STR_CID_SUGAR_BEET)",
sprite="NEW_CARGO_SPRITE",
weight="1.0",
is_freight="1",
cargo_classes="bitmask(CC_BULK)",
cargo_label="SGBT",
town_growth_effect="TOWNGROWTH_NONE",
town_growth_multiplier="1.0",
units_of_cargo="TTD_STR_TONS",
items_of_cargo="string(STR_CARGO_UNIT_SUGAR_BEET)",
penalty_lowerbound="5",
single_penalty_length="30",
price_factor=99,
capacity_multiplier="1",
icon_indices=(14, 1),
)
|
[
"andy@teamrubber.com"
] |
andy@teamrubber.com
|
9c026afa2692b1cfc3164dd2babc3391c3cf8218
|
4c0e871eb19d6ca5b8c550b60c4e3aa628ec729e
|
/Python记录/LeetCode/2连续字符串.py
|
264a62db73f8021d2504a5ac8693311de18f56c3
|
[] |
no_license
|
west789/Document
|
d4e7df6ff79046bf2c66ea082582feb2e7b8c29c
|
9b4de781bd2a7ecc15342098366f0123e83f6191
|
refs/heads/master
| 2021-05-26T07:31:52.444264
| 2019-05-20T07:20:07
| 2019-05-20T07:20:07
| 127,928,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,166
|
py
|
import copy
def doMain(s):
"""
:type s: str
:rtype: int
"""
# 解法1
# strDict = {}
# currentMax = 0
# maxLength = 0
# for i in range(len(s)):
# if s[i] in strDict and i-strDict[s[i]]-1<=currentMax:
# if maxLength < currentMax:
# maxLength = currentMax
# currentMax = i - strDict[s[i]] - 1
# currentMax += 1
# print(maxLength)
# print (currentMax)
# strDict[s[i]] = i
# print (strDict)
# print (maxLength if maxLength > currentMax else currentMax)
# return (maxLength if maxLength > currentMax else currentMax)
# 解法2
if not s:
return 0
longest_str = 1
substr=''
for item in s:
if item not in substr:
substr += item
else:
if len(substr) > longest_str:
longest_str = len(substr)
substr += item
substr = substr[substr.index(item)+1:]
print (substr)
if len(substr) > longest_str:
longest_str = len(substr)
return longest_str
if __name__ == '__main__':
s='pwdfwke'
doMain(s)
|
[
"738758058@qq.com"
] |
738758058@qq.com
|
f6d813cfeac9e1006402839f02d6ac002649ebc2
|
195b8d12796872c05d539aa9283fc3f407b8d8b5
|
/tempest/tempest/common/ssh.py
|
be6fe273f439ff41d0279ecd115bd590412c5f0a
|
[
"Apache-2.0"
] |
permissive
|
rvbelapure/openstack-nova-sched
|
afaa5928da3a8430b64bc23aedb251bae0e7d3ef
|
325da0e08979d79b7470d7506ced1b4210e2b696
|
refs/heads/master
| 2021-01-17T05:28:44.474242
| 2013-04-20T21:18:35
| 2013-04-20T21:18:35
| 9,082,500
| 0
| 1
| null | 2021-09-07T08:33:18
| 2013-03-28T17:30:46
|
Python
|
UTF-8
|
Python
| false
| false
| 5,444
|
py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cStringIO import StringIO
import select
import socket
import time
import warnings
from tempest import exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
from paramiko import RSAKey
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, basestring):
pkey = RSAKey.from_private_key(StringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self):
"""Returns an ssh connection to the specified host."""
_timeout = True
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
while not self._is_timed_out(self.timeout, _start_time):
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.timeout, pkey=self.pkey)
_timeout = False
break
except socket.error:
continue
except paramiko.AuthenticationException:
time.sleep(5)
continue
if _timeout:
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
return ssh
def _is_timed_out(self, timeout, start_time):
return (time.time() - timeout) > start_time
def connect_until_closed(self):
"""Connect to the server and wait until connection is lost."""
try:
ssh = self._get_ssh_connection()
_transport = ssh.get_transport()
_start_time = time.time()
_timed_out = self._is_timed_out(self.timeout, _start_time)
while _transport.is_active() and not _timed_out:
time.sleep(5)
_timed_out = self._is_timed_out(self.timeout, _start_time)
ssh.close()
except (EOFError, paramiko.AuthenticationException, socket.error):
return
def exec_command(self, cmd):
"""
Execute the specified command on the server.
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
select_params = [channel], [], [], self.channel_timeout
while True:
ready = select.select(*select_params)
if not any(ready):
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise exceptions.SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Returns true if ssh can connect to server."""
try:
connection = self._get_ssh_connection()
connection.close()
except paramiko.AuthenticationException:
return False
return True
|
[
"owlpostarun@gmail.com"
] |
owlpostarun@gmail.com
|
7f1dedeaf91d770dd0311fae73c3bbe5539079c9
|
3e24611b7315b5ad588b2128570f1341b9c968e8
|
/Template/Graph2tex.py
|
451adf31f5883e688507bbbf8b1b66755fe1027d
|
[
"BSD-2-Clause"
] |
permissive
|
bioCKO/lpp_Script
|
dc327be88c7d12243e25557f7da68d963917aa90
|
0cb2eedb48d4afa25abc2ed7231eb1fdd9baecc2
|
refs/heads/master
| 2022-02-27T12:35:05.979231
| 2019-08-27T05:56:33
| 2019-08-27T05:56:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,036
|
py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<>
Purpose:
Created: 2017/5/9
"""
import re,os
from optparse import OptionParser
if __name__ == "__main__":
usage = '''usage: python2.7 %prog [options]
'''
parser = OptionParser(usage =usage )
parser.add_option("-i", "--Input", action="store",
dest="inputData",
help="Input Data")
parser.add_option("-o", "--Output", action="store",
dest="Output",
help=" Output")
parser.add_option("-c", "--Caption", action="store",
dest="Caption",
help=" Caption")
(options, args) = parser.parse_args()
inputData = os.path.abspath( options.inputData )
Output = os.path.abspath( options.Output )
Caption = options.Caption
OUTPUT = open( Output,'w' )
OUTPUT.write("""
\\begin{figure}[H]
\\centering
\\includegraphics[width=0.8\\textwidth]{%s}
\\captionsetup{labelsep=period}
\\caption{%s}
\\end{figure}
"""%(inputData,Caption))
|
[
"409511038@qq.com"
] |
409511038@qq.com
|
187eea89a649f18b50a7c8997ccffecd3bbf6cdd
|
684329a9a5d49a444f6f9e0a832db4aca4baef2c
|
/mvp/newsletter/admin.py
|
6b21e204de9a31de2c3c5dae9903408d27980f8e
|
[] |
no_license
|
theparadoxer02/Shipping-Monk
|
f12735c809fadac5a5e462fd762559fca7d45986
|
0f0548cf85ff05ee4bfe65ccf0b739e0ad340bc9
|
refs/heads/master
| 2021-01-19T21:32:31.696389
| 2017-02-20T03:42:14
| 2017-02-20T03:42:14
| 82,513,484
| 0
| 1
| null | 2017-02-20T03:37:52
| 2017-02-20T03:37:52
| null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from django.contrib import admin
# Register your models here.
from .forms import SignUpForm
from .models import SignUp
class SignUpAdmin(admin.ModelAdmin):
list_display = ["__str__", "timestamp", "updated"]
form = SignUpForm
#class Meta:
#model = SignUp
admin.site.register(SignUp, SignUpAdmin)
|
[
"rohit.yadav848@yahoo.com"
] |
rohit.yadav848@yahoo.com
|
7e33503d2dada68f3026bb0368169aee76d50f17
|
8b9b46bef13f2a562ce976f791ef30472b0e4652
|
/2020-04/4-08/19删除链表的倒数第N个节点/19.py
|
640fb5a14b7ef56a4a8f025b6677d86925d5dd60
|
[] |
no_license
|
Annihilation7/Leetcode-Love
|
0d1db2776b79f4c65fd2781b2d0031d1efd1ef14
|
3fa96c81f92595cf076ad675ba332e2b0eb0e071
|
refs/heads/master
| 2021-03-21T17:06:35.260644
| 2020-05-07T14:12:44
| 2020-05-07T14:12:44
| 247,314,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# -*- coding: utf-8 -*-
# Editor : Pycharm
# File : 19.py
# Author : ZhenyuMa
# Created : 2020/4/9 9:33 下午
# Description : 双指针的题
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def removeNthFromEnd(self, head: ListNode, n: int) -> ListNode:
left_node = head
right_node = head
while n:
try:
right_node = right_node.next
except Exception:
print('invalid input.')
return None
n -= 1
if right_node is None:
ret_node = head.next
head.next = None
return ret_node
while right_node.next:
right_node = right_node.next
left_node = left_node.next
del_node = left_node.next
left_node.next = del_node.next
del_node.next = None # 便于回收
return head
|
[
"763366463@qq.com"
] |
763366463@qq.com
|
71292a94bbd048605f8de67ed6a624d57f94b230
|
5167d0792b35d2214329d8e692734a1e058efba5
|
/Linked List/rearrange.py
|
2e07e6237d60229d2228eb9eaee46ced54102d30
|
[] |
no_license
|
mmenghnani/CS_learn
|
6dac86ede403fedad6ecfb69b05472b98e605e50
|
482f97ae5f2cb696ea82dd695d5b68b2aaf12742
|
refs/heads/master
| 2020-05-04T15:03:02.422491
| 2018-02-12T16:33:53
| 2018-02-12T16:33:53
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 618
|
py
|
def rearrange(head):
slow = head
fast = slow.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
head1 = head
head2 = slow.next
slow.next = None
reverselist(head2)
head = None
curr = head
while head1 or head2:
if head1 :
curr.next = head1
curr = curr.next
head1 = head1.next
if head2:
curr.next = head2
curr = curr.next
head2 = head2.next
head = head.next
'''
Time Complexity of this solution is O(n).
'''
|
[
"sahilgaucho@gmail.com"
] |
sahilgaucho@gmail.com
|
29d0f5a4b0dcb2ba354cf97f7bcac67f166558ac
|
2a318f4c8372c75224b2d79106ef52d8f4375e71
|
/python/get_mailfolder.py
|
654694c9ec1369366db52a5e282ebd09ee8cedf9
|
[] |
no_license
|
keyur32/graph-snippets
|
0d4bacc66b5fb0bbfddb73695fa61a5538eaf038
|
e416d3ad86abdb30449325c06758e8cc6d73c137
|
refs/heads/master
| 2021-01-23T05:29:59.155567
| 2017-06-01T02:11:23
| 2017-06-01T02:11:23
| 92,971,791
| 1
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 205
|
py
|
import http.client
conn = http.client.HTTPSConnection("graph.microsoft.com")
conn.request("GET", "/v1.0/me/mailFolders/%7Bid%7D")
res = conn.getresponse()
data = res.read()
print(data.decode("utf-8"))
|
[
"keyur32@hotmail.com"
] |
keyur32@hotmail.com
|
6871bc58de9ec6556a53a23b30a28172dd055fad
|
9edd6cd0aac07bc3a433ec1a99b7922c4e8256ba
|
/marketing/migrations/0005_emailmarketing.py
|
6f6bc7c5c5efcc52d525a68e64f3975d02fb56e0
|
[] |
no_license
|
NahidAkhtar84/NShopping
|
87935f3119c918baed8d1ea3348c48028b686dfe
|
e466414853348a30bcb5e3096b847cc89a6c0976
|
refs/heads/master
| 2023-03-14T15:18:20.560228
| 2021-03-02T20:59:39
| 2021-03-02T20:59:39
| 343,904,888
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 664
|
py
|
# Generated by Django 3.1.5 on 2021-02-24 21:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('marketing', '0004_auto_20210223_0718'),
]
operations = [
migrations.CreateModel(
name='emailMarketing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
],
),
]
|
[
"nahid.ibne.akhtar@gmail.com"
] |
nahid.ibne.akhtar@gmail.com
|
aea490cd24449091a1368b875ec5fe09f42dc668
|
a1eb0bb73680bc42af97eea6b4d7811453dc6758
|
/SVM/venv/Scripts/easy_install-script.py
|
b246adbc41772d1275dc0105e83226f24307b6aa
|
[] |
no_license
|
PotatoPig/machine-learning
|
23c2ba5e7cf9d66c92309437d47d139bbf4e866f
|
eb7ae7b8bc03d765e508b1a1c222ea15d25b1c21
|
refs/heads/master
| 2020-07-22T15:00:03.607116
| 2019-09-09T06:47:33
| 2019-09-09T06:47:33
| 207,239,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
#!D:\CS_Project\MachineLearning\SVM\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
|
[
"Binhan Xu"
] |
Binhan Xu
|
d16cfbe7414805ba8bbe1c033534772a2c15925c
|
b22588340d7925b614a735bbbde1b351ad657ffc
|
/athena/LArCalorimeter/LArMonTools/share/LArMonTools_jobOptions_withOddCells.py
|
fdeb466c1e4d98dc42670897d1a9cecf12eb5aea
|
[] |
no_license
|
rushioda/PIXELVALID_athena
|
90befe12042c1249cbb3655dde1428bb9b9a42ce
|
22df23187ef85e9c3120122c8375ea0e7d8ea440
|
refs/heads/master
| 2020-12-14T22:01:15.365949
| 2020-01-19T03:59:35
| 2020-01-19T03:59:35
| 234,836,993
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,208
|
py
|
# most of them should be initialized already in RecExCommission
ToolSvc = Service( "ToolSvc" )
if not 'CheckEveryNoEvents' in dir():
CheckEveryNoEvents=100
#----------------------------------------------------------
theApp.Dlls += [ "AthenaMonitoring"]
theApp.Dlls += [ "LArMonTools"]
theApp.TopAlg += ["AthenaMon/LArMon1"]
LArMon1 = Algorithm( "LArMon1" )
LArMon1.CheckEveryNoEvents=CheckEveryNoEvents
# include all monitoring tools
#include ("LArMonTools/LAr2DNoiseMonTool_jobOptions.py" )
#include ("LArMonTools/LArDigitNoiseMonTool_jobOptions.py" )
# include ("LArMonTools/LArDigitSimpleMonTool_jobOptions.py" )
#include ("LArMonTools/LArDigMonTool_jobOptions.py" )
#include ("LArMonTools/LArFEBMon_jobOptions.py" )
#include ("LArMonTools/LArRawChannelMonTool_jobOptions.py" )
#include ("LArMonTools/LArRawChannelNoiseMonTool_jobOptions.py" )
#include ("LArMonTools/LArScaNoiseMonTool_jobOptions.py" )
include ("LArMonTools/LArEventInfoMonTool_jobOptions.py" )
#include ("LArMonTools/LArAccumDigMonTool_jobOptions.py")
#include ("LArMonTools/LArFebNoiseMonTool_jobOptions.py")
#include ("LArMonTools/LArOddCellsMonTool_jobOptions.py")
#include ("LArMonTools/LArRoughCorrelMonTool_jobOptions.py")
|
[
"rushioda@lxplus754.cern.ch"
] |
rushioda@lxplus754.cern.ch
|
b38c4139c0650c0fe99411a7886d9897a6e474ed
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/10/usersdata/124/24764/submittedfiles/testes.py
|
cb319ab269e7c22443456c6988769cf5a8eb8080
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,158
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
def vabsol(x):
if x < 0:
x = -1*x
return x
def calculopi(y):
c = 3
d = 2
for i in range (0, y, 1):
if i%2 != 0:
c = c - (4/(d*(d+1)*(d+2)))
elif i%2 == 0:
c = c + (4/(d*(d+1)*(d+2)))
d = d + 2
return c
def cos(z, epsilon):
cosz = 1
v = 2
fat = 1
cont = 1
for i in range (v, 0, -1):
fat = fat*i
d = (z**2)/fat
if o%2 != 0:
cosz = cosz - d
elif o%2 == 0:
cosz = cosz + d
while True:
if epsilon <= d:
v = v + 2
fat = 1
cont = cont + 1
else:
break
return cosz
def razaurea(m, epsilon):
pi = calculopi(m)
f = 2*cos(pi/5, epsilon)
return f
m = int(input('Digite o número m de termos da fórmula de pi: '))
epsilon = input('Digite o epsilon para o cálculo da razão áurea: ')
m = vabsol(m)
print('Valor aproximado de pi: %.15f' %calculopi(m))
print('Valor aproximado da razão áurea: %.15f' %razaurea(m, epsilon))
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
78485c52167656823797fb21bb81f97525dfa5fa
|
d94f758b2a5120fab69f48c7888b232599a05602
|
/app/user/serializers.py
|
2b2c06b1279b9ee6248e204ffe9d87b5bf248944
|
[
"MIT"
] |
permissive
|
zdravkob98/recipe-app-api
|
51b74d961b13a0dec7ca31320d148bc804ae41a1
|
1aa236f69ee3960833219cabd6c9293d6d0f2ba4
|
refs/heads/main
| 2023-02-16T00:29:15.110453
| 2021-01-13T17:33:14
| 2021-01-13T17:33:14
| 328,614,779
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,730
|
py
|
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""serializer for the users object"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization')
attrs['user'] = user
return attrs
|
[
"zdravkobonev@abv.bg"
] |
zdravkobonev@abv.bg
|
96f9490be55b387dd0ae99a0ba8576011052572e
|
8db94de54b604475fa874c9f2c22c07aeb73e57a
|
/singular-value-decomposition/data/cleaner.py
|
0bc381051da4a28aa45e54677873ad705130e737
|
[] |
no_license
|
silky/programmers-introduction-to-mathematics
|
5a13d533f2efc24eb55d37d2a908cf7095d27858
|
41e432012f5a6163db5bb2d77b8cd094877927b6
|
refs/heads/master
| 2020-04-03T04:31:33.992744
| 2018-10-27T22:27:07
| 2018-10-27T22:27:07
| 155,016,472
| 1
| 0
| null | 2018-10-27T22:47:38
| 2018-10-27T22:47:38
| null |
UTF-8
|
Python
| false
| false
| 2,161
|
py
|
import json
import os
from nltk.corpus import stopwords, wordnet
from nltk.stem.wordnet import WordNetLemmatizer
from nltk import pos_tag, word_tokenize
def loadRaw(directory):
documents = dict()
for filename in os.listdir(directory):
if filename[-3:] == 'txt':
with open(os.path.join(directory, filename), 'r') as infile:
documents[filename] = infile.read()
return documents
allWords = None
def words():
global allWords
dirname = os.path.dirname(__file__)
with open(os.path.join(dirname, 'one-grams.txt'), 'r') as infile:
allWords = [line.strip() for line in infile]
return set(allWords)
# Extract a list of tokens from a cleaned string.
def tokenize(s):
stopWords = set(stopwords.words('english'))
wordsToKeep = words() - stopWords
return [x.lower() for x in word_tokenize(s)
if x in wordsToKeep and len(x) >= 3]
def wordnetPos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
def process(output_filename="all_stories.json"):
print("Loading...")
dirname = os.path.dirname(__file__)
documentDict = loadRaw(os.path.join(dirname, 'cnn-stories'))
documents = []
print("Cleaning...")
i = 0
for filename, documentText in documentDict.items():
tokens = tokenize(documentText)
tagged_tokens = pos_tag(tokens)
wnl = WordNetLemmatizer()
stemmedTokens = [wnl.lemmatize(word, wordnetPos(tag)).lower()
for word, tag in tagged_tokens]
documents.append({
'filename': filename,
'text': documentText,
'words': stemmedTokens,
})
if i % 100 == 0:
print(i)
i += 1
print("Writing to disk...")
with open(os.path.join(dirname, output_filename), 'w') as outfile:
outfile.write(json.dumps(documents))
print("Done!")
if __name__ == "__main__":
process()
|
[
"j2kun@users.noreply.github.com"
] |
j2kun@users.noreply.github.com
|
666718bff6602e071ff4eec4558e2b234c5ebacb
|
28d971fe35e5cf9d5446b712c1100dbd1f5236aa
|
/boxx/tool/toolIo.py
|
f6e1f25e364856fa657792da3c4b9d16d166250c
|
[] |
no_license
|
snoworld888/boxx
|
d396833167c0d020a2066296490ae17c718ae2ea
|
f494e265cc85790b3dc15aaa693055b7c783a932
|
refs/heads/master
| 2021-03-26T00:47:44.553795
| 2020-03-13T09:36:20
| 2020-03-13T09:36:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,365
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os, sys
import warnings
from functools import wraps
from ..ylsys import py2, sysi
class BoxxException(Exception):
'''
root Exception for boxx
'''
pass
class BoxxWarning(Warning):
'''
root warninng for boxx
'''
pass
class OffScreenWarning(BoxxWarning):
pass
class Except():
'''
get traceback frame in with
>>> with excep:
>>> 1/0
>>> dira(excep)
'''
def __init__(self, deep=0):
self.deep = deep
def __enter__(self):
pass
def __exit__(self, typee, value, traceback):
deep = self.deep
while deep:
deep -= 1
traceback = traceback.tb_next
self.type = typee
self.value = self.v = value
self.traceback = self.t = traceback
self.frame = self.f = traceback.tb_frame
excep = Except()
def getExcept(fun):
'''
exec `fun()` and return (Exception, trace, frame)
'''
try:
exc = Except(1)
with exc:
fun()
except Exception as ee:
e = ee
return e, exc.traceback, exc.frame
def warn(msg, warnType=BoxxWarning, filename=None, line=None, module='boxx', blue=False):
'''
log a warning of type warnType warn will auto fill filename and line
'''
msg = '''%s
%s'''%(('\x1b[36m%s\x1b[0m' if blue else '%s')% 'warning from boxx', msg)
if filename is None or line is None:
f = sys._getframe(1)
c = f.f_code
filename = c.co_filename if filename is None else filename
line = c.co_firstlineno if line is None else line
warnings.warn_explicit(msg, warnType, filename, line, module)
warn1timeCache = {}
@wraps(warn)
def warn1time(msg, *l, **kv):
'''
log a warning of type warnType warn will auto fill filename and line
warn only one time
'''
if not warn1timeCache.get(msg):
warn(msg, *l, **kv)
warn1timeCache[msg] = True
getsize = os.path.getsize
def getsizem(path='.'):
'''
返回 path 的大小 支持文件夹 单位为 MB
'''
if os.path.isdir(path):
return sum([getsizem(os.path.join(path, p)) for p in os.listdir(path)])
return os.path.getsize(path)/float(1024**2)
def fileJoinPath(_file_,path='.'):
'''
返回 __file__ + 相对路径 path 后的绝对路径
'''
from os.path import abspath,join,dirname
apath = abspath(join(dirname(abspath(_file_)),path))
return apath
def filename(path):
'''
将路径和后缀名除去 只留下文件名字
'''
filen = name = os.path.basename(path)
if '.' in name:
filen = name[:name.rindex('.')]
return filen
def relfile(relative_path):
'''
Return a absolute version of a relative_path relative the __file__
'''
frame = sys._getframe(1)
if '__file__' not in frame.f_globals:
return relative_path
_file_ = frame.f_globals['__file__']
abspath = os.path.abspath(os.path.join(_file_, '..', relative_path))
return abspath
def listdir(path=None):
path = path or '.'
return os.listdir(path)
def openread(path, encoding='utf-8'):
'''
返回path文件的文本内容
'''
if py2:
with open(path, 'r') as f:
return f.read()
with open(path, 'r', encoding=encoding) as f:
strr = f.read()
return strr
def openwrite(strr, path, mode='w', encoding='utf-8'):
'''
将strr写入path
'''
if py2:
with open(path, mode) as f:
f.write(strr)
return path
with open(path, mode, encoding=encoding) as f:
f.write(strr)
return path
def validFilename(filename, replaceBy='_'):
'''
return validate filename
'''
import re
if sysi.win:
rstr = r"[\/\\\:\*\?\"\<\>\|]" # '/ \ : * ? " < > |'
else:
rstr = r"[\/]" # ' / '
newName = re.sub(rstr, replaceBy, filename)
return newName
def first_exist_dir(*dirs):
"""Input dirs and return the first exist dir.
If none dir exist, return First
"""
if len(dirs) == 1 and isinstance(dirs[0], (list, tuple)):
dirs = dirs[0]
for dirr in dirs:
if os.path.isdir(dirr):
return dirr
return dirs[0]
def loadjson(path):
import json
with open(path, 'r') as f:
js = json.load(f)
return js
def savejson(obj, path, indent=None):
import json
import numpy as np
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return json.JSONEncoder.default(self, obj)
with open(path, 'w') as f:
json.dump(obj, f, indent=indent, cls=NumpyEncoder)
return path
def replaceTabInPy(dirr='.'):
'''
将所有tab换成4个空格
'''
from glob import glob
from .toolLog import log
pys = glob(os.path.join(dirr, '*.py'))
for py in pys:
code = openread(py)
log(py,code.count('\t'))
new = code.replace('\t',' '*4)
openwrite(new, py)
def saveData(data, name='pickle_of_boxx', log=False): #保存进度
'''
保存二进制数据
'''
import pickle
if log:
print('正在将数据写入',os.path.abspath('.'),'下的文件:“'+name+'”,请稍等。。。')
with open(name, "wb") as f:
pickle.dump(data,f)
if log:
print('\n文件“'+name+'”已保存在',os.path.abspath('.'),'目录下!')
def loadData(name='pickle_of_boxx', log=False): #载入数据
import pickle
if not os.path.isfile(name):
print('在',os.path.abspath('.'),'目录下,“'+name+'”文件不存在,操作失败!')
if log:
print('正在读取',os.path.abspath('.'),'目录下的文件:“'+name+'”\n请稍等。。。')
with open(name,"rb") as f:
data = pickle.load(f)
f.close()
if log:
print('文件:“'+name+'”读取成功!')
return data
def browserOpen(url):
'''
open url with browser
if can't open browser raise warn
'''
import webbrowser
if not webbrowser.open_new_tab(url):
from boxx import warn
warn('''can't open url with web browser, plaese open url:"%s" in your browser'''%url)
if __name__ == "__main__":
pass
|
[
"ylxx@live.com"
] |
ylxx@live.com
|
490db4e8d1d756d73832a1634cf6a28177fd6c25
|
711756b796d68035dc6a39060515200d1d37a274
|
/output_cog_tags/optimized_2040.py
|
8a363172a1b50d5cfa0a341f9e7897d38cb2722f
|
[] |
no_license
|
batxes/exocyst_scripts
|
8b109c279c93dd68c1d55ed64ad3cca93e3c95ca
|
a6c487d5053b9b67db22c59865e4ef2417e53030
|
refs/heads/master
| 2020-06-16T20:16:24.840725
| 2016-11-30T16:23:16
| 2016-11-30T16:23:16
| 75,075,164
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,580
|
py
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "Cog1_Anch" not in marker_sets:
s=new_marker_set('Cog1_Anch')
marker_sets["Cog1_Anch"]=s
s= marker_sets["Cog1_Anch"]
mark=s.place_marker((573.679, 413.934, 589.856), (0, 0, 1), 21.9005)
if "Cog2_GFPN" not in marker_sets:
s=new_marker_set('Cog2_GFPN')
marker_sets["Cog2_GFPN"]=s
s= marker_sets["Cog2_GFPN"]
mark=s.place_marker((510.524, 576.355, 443.039), (1, 0.5, 0), 21.9005)
if "Cog2_GFPC" not in marker_sets:
s=new_marker_set('Cog2_GFPC')
marker_sets["Cog2_GFPC"]=s
s= marker_sets["Cog2_GFPC"]
mark=s.place_marker((382.804, 624.377, 470.584), (1, 0.5, 0), 21.9005)
if "Cog2_Anch" not in marker_sets:
s=new_marker_set('Cog2_Anch')
marker_sets["Cog2_Anch"]=s
s= marker_sets["Cog2_Anch"]
mark=s.place_marker((403.834, 550.392, 767.711), (1, 0.5, 0), 21.9005)
if "Cog3_GFPN" not in marker_sets:
s=new_marker_set('Cog3_GFPN')
marker_sets["Cog3_GFPN"]=s
s= marker_sets["Cog3_GFPN"]
mark=s.place_marker((491.978, 565.805, 484.115), (1, 0.87, 0), 21.9005)
if "Cog3_GFPC" not in marker_sets:
s=new_marker_set('Cog3_GFPC')
marker_sets["Cog3_GFPC"]=s
s= marker_sets["Cog3_GFPC"]
mark=s.place_marker((514.203, 574.851, 469.244), (1, 0.87, 0), 21.9005)
if "Cog3_Anch" not in marker_sets:
s=new_marker_set('Cog3_Anch')
marker_sets["Cog3_Anch"]=s
s= marker_sets["Cog3_Anch"]
mark=s.place_marker((407.78, 393.952, 433.493), (1, 0.87, 0), 21.9005)
if "Cog4_GFPN" not in marker_sets:
s=new_marker_set('Cog4_GFPN')
marker_sets["Cog4_GFPN"]=s
s= marker_sets["Cog4_GFPN"]
mark=s.place_marker((397.715, 422.213, 633.49), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_GFPC" not in marker_sets:
s=new_marker_set('Cog4_GFPC')
marker_sets["Cog4_GFPC"]=s
s= marker_sets["Cog4_GFPC"]
mark=s.place_marker((299.134, 345.348, 540.342), (0.97, 0.51, 0.75), 21.9005)
if "Cog4_Anch" not in marker_sets:
s=new_marker_set('Cog4_Anch')
marker_sets["Cog4_Anch"]=s
s= marker_sets["Cog4_Anch"]
mark=s.place_marker((661.347, 715.119, 527.436), (0.97, 0.51, 0.75), 21.9005)
if "Cog5_GFPN" not in marker_sets:
s=new_marker_set('Cog5_GFPN')
marker_sets["Cog5_GFPN"]=s
s= marker_sets["Cog5_GFPN"]
mark=s.place_marker((487.987, 544.677, 575.148), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_GFPC" not in marker_sets:
s=new_marker_set('Cog5_GFPC')
marker_sets["Cog5_GFPC"]=s
s= marker_sets["Cog5_GFPC"]
mark=s.place_marker((469.098, 619.63, 444.515), (0.39, 0.31, 0.14), 21.9005)
if "Cog5_Anch" not in marker_sets:
s=new_marker_set('Cog5_Anch')
marker_sets["Cog5_Anch"]=s
s= marker_sets["Cog5_Anch"]
mark=s.place_marker((393.42, 571.365, 677.83), (0.39, 0.31, 0.14), 21.9005)
if "Cog6_GFPN" not in marker_sets:
s=new_marker_set('Cog6_GFPN')
marker_sets["Cog6_GFPN"]=s
s= marker_sets["Cog6_GFPN"]
mark=s.place_marker((473.712, 591.978, 490.691), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_GFPC" not in marker_sets:
s=new_marker_set('Cog6_GFPC')
marker_sets["Cog6_GFPC"]=s
s= marker_sets["Cog6_GFPC"]
mark=s.place_marker((538.76, 519.949, 518.606), (0.6, 0.31, 0.64), 21.9005)
if "Cog6_Anch" not in marker_sets:
s=new_marker_set('Cog6_Anch')
marker_sets["Cog6_Anch"]=s
s= marker_sets["Cog6_Anch"]
mark=s.place_marker((430.746, 442.112, 406.136), (0.6, 0.31, 0.64), 21.9005)
if "Cog7_GFPN" not in marker_sets:
s=new_marker_set('Cog7_GFPN')
marker_sets["Cog7_GFPN"]=s
s= marker_sets["Cog7_GFPN"]
mark=s.place_marker((539.245, 560.924, 539.566), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_GFPC" not in marker_sets:
s=new_marker_set('Cog7_GFPC')
marker_sets["Cog7_GFPC"]=s
s= marker_sets["Cog7_GFPC"]
mark=s.place_marker((445.16, 667.254, 480.14), (0.89, 0.1, 0.1), 21.9005)
if "Cog7_Anch" not in marker_sets:
s=new_marker_set('Cog7_Anch')
marker_sets["Cog7_Anch"]=s
s= marker_sets["Cog7_Anch"]
mark=s.place_marker((311.998, 611.308, 568.923), (0.89, 0.1, 0.1), 21.9005)
if "Cog8_GFPC" not in marker_sets:
s=new_marker_set('Cog8_GFPC')
marker_sets["Cog8_GFPC"]=s
s= marker_sets["Cog8_GFPC"]
mark=s.place_marker((499.633, 590.037, 527.988), (0.3, 0.69, 0.29), 21.9005)
if "Cog8_Anch" not in marker_sets:
s=new_marker_set('Cog8_Anch')
marker_sets["Cog8_Anch"]=s
s= marker_sets["Cog8_Anch"]
mark=s.place_marker((449.243, 637.42, 675.147), (0.3, 0.69, 0.29), 21.9005)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
[
"batxes@gmail.com"
] |
batxes@gmail.com
|
1377caae35112646cf15378fcb1642bc351c221b
|
d82b879f41e906589a0a6ad5a6a09e0a0032aa3f
|
/ObservationScripts/observe_source_rtcor.py
|
bf3940f7e3082d056e6821d91546c3ffa169c64c
|
[] |
no_license
|
SETIatHCRO/ATA-Utils
|
66718eed669882792148fe0b7a2f977cd0f6ac2e
|
59f4d21b086effaf41d5e11e338ce602c803cfd0
|
refs/heads/master
| 2023-08-16T20:41:44.233507
| 2023-08-10T20:39:13
| 2023-08-10T20:39:13
| 137,617,987
| 5
| 5
| null | 2023-08-10T20:39:14
| 2018-06-17T00:07:05
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,840
|
py
|
#!/home/obsuser/miniconda3/envs/ATAobs/bin/python
import atexit
from ATATools import ata_control, logger_defaults
from SNAPobs import snap_dada, snap_if
import numpy as np
import os,sys
import time
import argparse
import logging
import os
def main():
logger = logger_defaults.getProgramLogger("observe",
loglevel=logging.INFO)
# Define antennas
ant_list = ["1c", "1g", "1h", "1k", "1e", "2a", "2b", "2c",
"2e", "2h", "2j", "2k", "2l", "2m", "3c", "3d",
"3l", "4j", "5b", "4g"]
ata_control.reserve_antennas(ant_list)
atexit.register(ata_control.release_antennas, ant_list, False)
antlo_list = [ant+lo.upper() for ant in ant_list for lo in ['b','c']]
freqs = [3000]*len(ant_list)
freqs_c = [3000]*len(ant_list)
# set LO + focus frequencies
ata_control.set_freq(freqs, ant_list, lo='b', nofocus=True)
ata_control.set_freq(freqs_c, ant_list, lo='c')
time.sleep(30)
# Track source
source = "3c84"
ata_control.make_and_track_ephems(source, ant_list)
# autotune + IF tuning
ata_control.autotune(ant_list)
snap_if.tune_if_antslo(antlo_list)
print("Tuning complete")
#time.sleep(20)
xgpu_int_time = 6.5536 #seconds
obs_time = 600 #seconds
print("="*79)
print("Setting correlator integration time")
print("set_postproc_keys.py -s --prefix XTIMEINT=%f" %xgpu_int_time)
os.system("set_postproc_keys.py -s --prefix XTIMEINT=%f" %xgpu_int_time)
print("="*79)
print("Starting new obs")
print("start_record_in_x.py -H 1 2 3 4 5 6 7 8 -i 10 -n %i" %obs_time)
os.system("start_record_in_x.py -H 1 2 3 4 5 6 7 8 -i 10 -n %i" %obs_time)
print("Recording...")
time.sleep(obs_time+20)
print("="*79)
print("Obs completed")
if __name__ == "__main__":
main()
|
[
"wael.a.farah@gmail.com"
] |
wael.a.farah@gmail.com
|
68503476a63039e16c0973604a668ad6bdf2eec2
|
8d6f9a3d65a189d99eff10e30cfabb0b761b635f
|
/scripts/support_hypercube_measures.py
|
d9bcd6c5528f1e6e38aab8773754772429a2826d
|
[
"BSD-3-Clause"
] |
permissive
|
arita37/mystic
|
db2ebbed139b163e3e5df49c2325b3de35dd8cd0
|
3dcdd4627eb759672091859e8334be075bfd25a5
|
refs/heads/master
| 2021-01-22T20:19:22.569893
| 2016-08-20T15:52:46
| 2016-08-20T15:52:46
| 66,545,670
| 1
| 0
| null | 2016-08-25T09:42:31
| 2016-08-25T09:42:31
| null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
#!/usr/bin/env python
#
# Author: Mike McKerns (mmckerns @caltech and @uqfoundation)
# Copyright (c) 1997-2016 California Institute of Technology.
# License: 3-clause BSD. The full license text is available at:
# - http://trac.mystic.cacr.caltech.edu/project/mystic/browser/mystic/LICENSE
from mystic.support import hypercube_measures
__doc__ = hypercube_measures.__doc__
if __name__ == '__main__':
import sys
hypercube_measures(sys.argv[1:])
# EOF
|
[
"mmckerns@968178ea-60bd-409e-af13-df8a517b6005"
] |
mmckerns@968178ea-60bd-409e-af13-df8a517b6005
|
8e645a2eefb43001aacb88d512c374af92c2427d
|
df690ac0484ff04cb63f71f528a9d0a0e557d6a3
|
/.history/ws_20210608101643.py
|
7b6d31a7d4f3a30447282a1e853cf3f05e3824be
|
[] |
no_license
|
khanhdk0000/Mqtt-Web-Socket
|
437777c740c68d4197353e334f6fe6a629094afd
|
4f9e49a3817baa9ebc4e4f8dcffc21b6ea9d0134
|
refs/heads/master
| 2023-06-20T17:08:09.447381
| 2021-06-08T17:42:37
| 2021-06-08T17:42:37
| 375,090,458
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,189
|
py
|
from flask import Flask
from flask_sock import Sock
import time
app = Flask(__name__)
sock = Sock(app)
import threading
BROKER = 'io.adafruit.com'
USER = 'khanhdk0000'
PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
TOPIC = 'khanhdk0000/feeds/'
LIGHT = 'light'
SOUND = 'sound'
TEMP = 'temp'
LCD = 'iot_led'
BUZZER = 'buzzer'
########
# USER = 'CSE_BBC'
# PASSWORD = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC = 'CSE_BBC/feeds/'
# USER1 = 'CSE_BBC1'
# PASSWORD1 = 'aio_FfID10QWNVSKUC2j15nLtOSeckin'
# TOPIC1 = 'CSE_BBC1/feeds/'
# LIGHT = 'bk-iot-light'
# SOUND = 'bk-iot-sound'
# TEMP = 'bk-iot-temp-humid'
# LCD = 'bk-iot-lcd'
# BUZZER = 'bk-iot-speaker'
resLight = '"id":"12","name":"LIGHT","data":"0","unit":""'
prevLight = resLight
resTemp = '"id":"13","name":"SOUND","data":"0","unit":""'
prevTemp = resTemp
resSound = '"id":"7","name":"TEMP-HUMID","data":"0","unit":""'
prevSound = resSound
def mqttGet(user, password,topic,device):
import paho.mqtt.client as mqtt, pu
def on_connect(client, userdata, flags, rc):
print("Connected with result code "+str(rc))
if rc == 0:
print('good')
else:
print('no good')
def on_message(client, userdata, msg):
print(msg.topic+" "+str(msg.payload))
def on_disconnect(client, userdata, flags, rc=0):
print("Disconnected result code " + str(rc))
def on_message(client, userdata, message):
if device == LIGHT:
global resLight
message = str(message.payload.decode("utf-8"))
resLight = message
elif device == TEMP:
global resTemp
message = str(message.payload.decode("utf-8"))
resTemp = message
elif device == SOUND:
global resSound
message = str(message.payload.decode("utf-8"))
resSound = message
client = mqtt.Client()
client.username_pw_set(username=user,password=password)
client.on_connect = on_connect
client.on_disconnect = on_disconnect
client.on_message = on_message
client.connect(BROKER, 1883, 60)
client.subscribe(topic)
client.loop_forever()
t1 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + LIGHT, LIGHT))
t1.start()
t2 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + TEMP, TEMP))
t2.start()
t3 = threading.Thread(target=mqttGet, name=mqttGet, args=(USER, PASSWORD,TOPIC + SOUND, SOUND))
t3.start()
@sock.route('/light')
def reverse(ws):
global resLight, prevLight
while True:
if prevLight == resLight:
continue
else:
ws.send(resLight)
prevLight = resLight
@sock.route('/sound')
def reverse(ws):
global resSound, prevSound
while True:
if prevSound == resSound:
continue
else:
ws.send(resSound)
prevSound = resSound
@sock.route('/temp')
def reverse(ws):
global resTemp, prevTemp
while True:
if prevTemp == resTemp:
continue
else:
ws.send(resTemp)
prevTemp = resTemp
if __name__ == '__main__':
app.run(debug=True)
|
[
"khanhtran28092000@gmail.com"
] |
khanhtran28092000@gmail.com
|
58020d50d29701fd850fcf8ccf83bbf252227aba
|
4ee2ed5479e34c11e78b98ec2428c623c0075772
|
/bots/lrs/posedziai.py
|
96ffa01f3441c06324d71503b6e240f289883fec
|
[] |
no_license
|
sirex/databot-bots
|
7c46ed7a7e5a4b7b5d7d7ab9cc7f17b1301e3e0b
|
c2bc4d4d5a3cfffe35eabf0660790f5e9b81ce41
|
refs/heads/master
| 2020-04-07T02:48:37.782069
| 2018-06-02T12:56:40
| 2018-06-02T12:56:40
| 44,805,410
| 5
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,286
|
py
|
#!/usr/bin/env python3
import yaml
import botlib
from databot import define, task
def append_last_session(task):
last = max(task.source.rows(), default=None, key=lambda x: x.value['pradžia'])
if last:
task.target.append(last.key, last.value)
with open('settings.yml') as f:
settings = yaml.load(f)
cookies = settings['cookies']['www.lrs.lt']
pipeline = {
'pipes': [
define('pradžios-puslapiai', compress=True),
define('sesijų-sąrašas'),
define('sesijų-puslapiai', compress=True),
define('posėdžių-sąrašas'),
define('posėdžių-puslapiai', compress=True),
define('klausimų-sąrašas'),
define('klausimų-puslapiai', compress=True),
],
'tasks': [
# Pirmas puslapis
task('pradžios-puslapiai').daily().download(
'http://www.lrs.lt/sip/portal.show?p_r=15275&p_k=1', cookies=cookies, check='#page-content h1.page-title'
),
# Sesijų sąrašas
task('pradžios-puslapiai', 'sesijų-sąrašas').select([
'#page-content .tbl-default xpath:tr[count(td)=3]', (
'td[1] > a.link@href', {
'url': 'td[1] > a.link@href',
'pavadinimas': 'td[1] > a.link:text',
'pradžia': 'td[2]:text',
'pabaiga': 'td[3]:text',
},
),
]).dedup(),
task('sesijų-sąrašas', 'sesijų-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
# Paskutinė sesija
# Visada siunčiam paskutinę sisiją, kadangi ten gali būti naujų posėdžių.
task('sesijų-sąrašas', 'sesijų-sąrašas').daily().apply(append_last_session),
task('sesijų-sąrašas', 'sesijų-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
# Posėdžių sąrašas
task('sesijų-puslapiai', 'posėdžių-sąrašas').select([
'#page-content .tbl-default xpath:tr[count(td)=4]/td[2]/a', (
'@href', {
'url': '@href',
'tipas': ':text',
'data': 'xpath:../../td[1]/a/text()',
'darbotvarkė': 'xpath:../../td[3]/a/@href',
'priimti projektai': 'xpath:../../td[4]/a/@href',
},
),
], check='#page-content h1.page-title').dedup(),
task('posėdžių-sąrašas', 'posėdžių-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
# Svarstytų klausimų sąrašas
task('posėdžių-puslapiai', 'klausimų-sąrašas').select([
'#page-content .tbl-default xpath:tr[count(td)=3]', (
'td[3] > a@href', {
'url': 'td[3] > a@href',
'laikas': 'td[1]:text',
'numeris': 'td[2]:text',
'klausimas': 'td[3] > a:text',
'tipas': 'xpath:td[3]/text()?',
},
),
], check='.fakt_pos > .list.main li > a').dedup(),
task('klausimų-sąrašas', 'klausimų-puslapiai').download(cookies=cookies, check='#page-content h1.page-title'),
],
}
if __name__ == '__main__':
botlib.runbot(pipeline)
|
[
"sirexas@gmail.com"
] |
sirexas@gmail.com
|
6be72f888e1a08d62fc7d499a22b2a5afc8712d0
|
958c4e0cc47caf325bc0dfb54ad37d5e90ceb28b
|
/src/s17/taskmanager/interfaces.py
|
d2127491be017cbfa88368380140c538b55f26e2
|
[] |
no_license
|
simplesconsultoria/s17.taskmanager
|
75cf0acfa9b1525f6b2849270edf0b780cbb1483
|
9ff31d4bf7cce4708956397f616900ca4d83d3ed
|
refs/heads/master
| 2021-01-25T07:28:28.857133
| 2015-07-29T18:09:55
| 2015-07-29T18:09:55
| 5,602,482
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,859
|
py
|
# -*- coding: utf-8 -*-
from plone.app.textfield import RichText
from plone.directives import form
from s17.taskmanager import MessageFactory as _
from zope import schema
from zope.schema.vocabulary import SimpleTerm
from zope.schema.vocabulary import SimpleVocabulary
priorities = SimpleVocabulary([
SimpleTerm(value=1, title=_(u'High')),
SimpleTerm(value=2, title=_(u'Normal')),
SimpleTerm(value=3, title=_(u'Low')),
])
class ITaskPanel(form.Schema):
responsible = schema.Choice(
title=_(u'Responsible'),
description=_(''),
required=False,
vocabulary='plone.app.vocabularies.Users',
)
can_add_tasks = schema.List(
title=_(u'Who can add tasks?'),
description=_(''),
required=False,
value_type=schema.Choice(vocabulary='plone.app.vocabularies.Groups'),
)
class ITask(form.Schema):
title = schema.TextLine(
title=_(u'Title'),
description=_(''),
required=True,
)
responsible = schema.Choice(
title=_(u'Responsible'),
description=_(''),
required=False,
vocabulary='plone.app.vocabularies.Users',
)
priority = schema.Choice(
title=_(u'Priority'),
description=_(''),
required=True,
vocabulary=priorities,
default=2,
)
text = RichText(
title=_(u'Task Detail'),
description=_(''),
required=False,
)
initial_date = schema.Date(
title=_(u'Initial date'),
description=_(''),
required=False,
readonly=True,
)
end_date = schema.Date(
title=_(u'End date'),
description=_(''),
required=False,
readonly=True,
)
provided_date = schema.Date(
title=_(u'Expected date'),
description=_(''),
required=False,
)
|
[
"hector.velarde@gmail.com"
] |
hector.velarde@gmail.com
|
f4a15652109abf926330829d5155be89a22ea2db
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/purview/azure-purview-scanning/tests/test_smoke.py
|
cccf9d71ecc17eda6ab343d13d4a1c3bbbe34273
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
# coding: utf-8
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# -------------------------------------------------------------------------
from testcase import PurviewScanningTest, PurviewScanningPowerShellPreparer
from devtools_testutils import recorded_by_proxy
class TestPurviewScanningSmoke(PurviewScanningTest):
@PurviewScanningPowerShellPreparer()
@recorded_by_proxy
def test_basic_smoke_test(self, purviewscanning_endpoint):
client = self.create_client(endpoint=purviewscanning_endpoint)
response = client.data_sources.list_all()
result = [item for item in response]
assert len(result) >= 1
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
9d7ea0b3a6493a8f7d5db68f4107169567274d8f
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03463/s773991951.py
|
929d4cbd0719590df2e6984427d7f287fe852c8f
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 306
|
py
|
N,A,B = map(int,input().split())
#print(A,B)
for i in range(N):
if A+1<B:
A +=1
else:
A += -1
if A <1:
ans="Borys"
break
#print(i,A,B)
if A<B-1:
B += -1
else:
B += 1
if B >N:
ans="Alice"
break
#print(i,A,B)
else:
ans="Draw"
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
842ca3cbd506420ec21602475bff1e1984496e9f
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part010511.py
|
937406fd5f610cca55b5bb4a2d6afec74f258fa1
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,302
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher145294(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.1.3.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.1.3.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher145294._instance is None:
CommutativeMatcher145294._instance = CommutativeMatcher145294()
return CommutativeMatcher145294._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 145293
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
fa17fafd30775f5c3d2aa23f4002d6aa9268cf9b
|
e06c7fd594c52425ab7fc5498c07ae14daf9578b
|
/api/common/encryption_service.py
|
af34eb1615955ea54f079a26447530fecaa2fb9d
|
[] |
no_license
|
rwheeler-7864/simplenight-api
|
bc35560eca1e1c25092a1bcdc4af1633367413b8
|
602646911a0155df5b70991d1445c10cee18cd33
|
refs/heads/master
| 2023-03-12T03:10:51.516499
| 2021-02-25T20:40:44
| 2021-02-25T20:40:44
| 342,370,358
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,692
|
py
|
import os
import secrets
from base64 import b64encode, b64decode
from typing import Optional
# noinspection PyPackageRequirements
from Crypto.Cipher import AES
# noinspection PyPackageRequirements
from Crypto.Util import Padding
from django.core.exceptions import ImproperlyConfigured
try:
from api.models.models import Feature
from api.common.request_context import get_request_context
except ImproperlyConfigured:
pass # Ignore in tests
class EncryptionService:
TEST_MODE_KEY = b"ABCDEFG012345678"
def __init__(self, encryption_key=None):
if encryption_key is not None:
self.encryption_key = bytes.fromhex(encryption_key)
else:
self.encryption_key = self._get_encryption_key()
def encrypt(self, clear_text: str) -> Optional[str]:
if clear_text is None:
return None
padded_clear_text = Padding.pad(clear_text.encode("utf-8"), AES.block_size)
return b64encode(self._get_cipher().encrypt(padded_clear_text)).decode("utf-8")
def decrypt(self, crypt_text: str) -> Optional[str]:
if crypt_text is None:
return None
clear_text = Padding.unpad(self._get_cipher().decrypt(b64decode(crypt_text)), AES.block_size)
return clear_text.decode("utf-8")
@staticmethod
def generate_encryption_key():
return secrets.token_bytes(16).hex()
def _get_cipher(self):
key = self.encryption_key
return AES.new(key, AES.MODE_CBC, key)
def _get_encryption_key(self):
encoded_key = os.getenv("ENCRYPTION_KEY")
if not encoded_key:
return self.TEST_MODE_KEY
return bytes.fromhex(encoded_key)
|
[
"randmwheeler@gmail.com"
] |
randmwheeler@gmail.com
|
40b67939e03b48cfcbe364e11cb77e642e791485
|
747135fab93554fac11d6c2184470d4bf2701d31
|
/style_guide/source/conf.py
|
cc3774d7c44179c0bf106e4354b8ccf3ad181cc7
|
[
"CC-BY-3.0"
] |
permissive
|
dhutty/chef-docs
|
5985249fce8a8b0fbaaf256830fbdf43a5ec9d6e
|
661c72f0e0405b4cec223bc0def67cd598035070
|
refs/heads/master
| 2021-01-18T00:11:51.224491
| 2014-05-16T21:36:35
| 2014-05-16T21:36:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,537
|
py
|
# -*- coding: utf-8 -*-
#
# Chef documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 22 13:50:49 2012.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates', '../../_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'style_guide'
# General information about the project.
project = u'Style Guide'
copyright = u'This work is licensed under a Creative Commons Attribution 3.0 Unported License.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.0.1'
# The full version, including alpha/beta/rc tags.
# release = '0.0.1-1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'emacs'
# highlight_language = 'ruby'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# A string of reStructuredText that will be included at the beginning of every source file that is read.
rst_prolog = """
.. include:: ../../swaps/swap_descriptions.txt
.. include:: ../../swaps/swap_names.txt
.. include:: ../../swaps/swap_notes.txt
"""
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'chef'
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['../../_themes/']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Style Guide"
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "../../images/chef_logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = "chef.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# This setting is being used by Chef to override a version # stamp inserted
# at every page bottom, using a string. For example: chef-client 11.6.x. Or:
# Enterprise Chef Server 11.0.x. And yeah, this is just a hack, but it's the
# hack that achieved the desired behavior. Plus, there's 0% chance that we'll
# ever want to insert a datetime stamp in the docs.
html_last_updated_fmt = 'Style Guide, version 1.0.0'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'**': ['localtoc.html', 'relations.html'],
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {
'search': 'chef_search.html',
}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = False
# If true, the index is split into individual pages for each letter.
html_split_index = False
# If true, links to the reST sources are added to the pages. Leave this one as True
# for the style guide, in case people want to see the reST used in the style guide.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = False
# This is set to "False" because we don't want to show the default copyright, but
# do want to show the custom string defined by the "copyright" general setting (above).
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'StyleGuide'
|
[
"jamescott@opscode.com"
] |
jamescott@opscode.com
|
8a7bac217e3dfa9c44fa5647150501862b97aa9b
|
cbda89443b351bb2047180dad4e300c13dc3df7f
|
/Crystals/Morpurgo_sp_outer/Jobs/TIPS_Pc/TIPS_Pc_anion_neut_inner2_outer1/TIPS_Pc_anion_neut_inner2_outer1.py
|
6fd3f3b888a68c5738ebb4a4af6dfc566150231f
|
[] |
no_license
|
sheridanfew/pythonpolarisation
|
080f52979f98d26360a46412a10c8e3f51ee4549
|
178e2684e9a239a8e60af5f7b1eb414ac5f31e92
|
refs/heads/master
| 2021-07-10T01:07:40.978790
| 2021-03-11T16:56:37
| 2021-03-11T16:56:37
| 96,101,351
| 0
| 0
| null | 2017-07-03T13:37:06
| 2017-07-03T10:54:52
| null |
UTF-8
|
Python
| false
| false
| 6,691
|
py
|
import sys
sys.path.append('../../../../../')
from BasicElements import *
from BasicElements.Register import GetRegister
from BasicElements.MoleculeFactory import ReadMoleculeType
from BasicElements.MoleculeFactory import GetMolecule
from BasicElements.Crystal import *
from Polarizability.GetDipoles import get_dipoles,split_dipoles_onto_atoms
from Polarizability import *
from Polarizability.GetEnergyFromDips import *
from Polarizability.JMatrix import JMatrix
import numpy as np
from math import *
from time import gmtime, strftime
import os
print strftime("%a, %d %b %Y %X +0000", gmtime())
name='TIPS_Pc_anion_neut_inner2_outer1'
#For crystals here, all cubic and centred at centre
insize=2
#number of TVs in each dir central mol is from edge of inner region
outsize=1
mols_cen=['TIPS_Pc_anion_aniso_cifstruct_chelpg.xyz']
mols_sur=['TIPS_Pc_neut_aniso_cifstruct_chelpg.xyz']
mols_outer=['sp_TIPS_Pc_neut.xyz']
#From cif:
'''
TIPS
data_k01029
_cell_length_a 7.5650(15)
_cell_length_b 7.7500(15)
_cell_length_c 16.835(3)
_cell_angle_alpha 89.15(3)
_cell_angle_beta 78.42(3)
_cell_angle_gamma 83.63(3)
_cell_volume 960.9(3)
'''
#Get translation vectors:
a=7.565015/0.5291772109217
b=7.750015/0.5291772109217
c=16.8353/0.5291772109217
alpha=89.153*(pi/180)
beta=78.423*(pi/180)
gamma=83.633*(pi/180)
cif_unit_cell_volume=960.9/(a*b*c*(0.5291772109217**3))
cell_volume=sqrt(1 - (cos(alpha)**2) - (cos(beta)**2) - (cos(gamma)**2) + (2*cos(alpha)*cos(beta)*cos(gamma)))
#Converts frac coords to carts
matrix_to_cartesian=np.matrix( [[a, b*cos(gamma), c*cos(beta)],
[0, b*sin(gamma), c*(cos(alpha) - cos(beta)*cos(gamma))/sin(gamma)],
[0, 0, c*cell_volume/sin(gamma)]])
#carts to frac
matrix_to_fractional=matrix_to_cartesian.I
#TVs, TV[0,1,2] are the three translation vectors.
TV=matrix_to_cartesian.T
cut=8.0
totsize=insize+outsize
#number of TVs in each dir nearest c inner mol is from edge of outer region
cenpos=[totsize,totsize,totsize]
length=[2*totsize+1,2*totsize+1,2*totsize+1]
maxTVs=insize
outer_maxTVs=insize+outsize
#for diamond outer, don't specify for cube and will fill to cube edges.
print 'name: ',name,'mols_cen: ', mols_cen,' mols_sur: ',mols_sur,' TVs: ', TV
# Place Molecules
prot_neut_cry=Crystal(name=name,mols_cen=mols_cen,mols_sur=mols_sur,cenpos=cenpos,length=length,TVs=TV,maxTVs=maxTVs,mols_outer=mols_outer,outer_maxTVs=outer_maxTVs)
#prot_neut_cry._mols contains all molecules.
#mols[0] contains a list of all molecules in position a, mols[1] all mols in pos'n b, etc.
#mols[0][x,y,z] contains molecule a in position x,y,z
#mols may as such be iterated over in a number of ways to consider different molecules.
prot_neut_cry().print_posns()
#Calculate Properties:
print strftime("%a, %d %b %Y %X +0000", gmtime())
E0 = np.matrix([0.,0.,0.])
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc jm'
jm = JMatrix(cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Calc dips:'
d = get_dipoles(E0=E0,jm=jm._m,cutoff=cut)
print strftime("%a, %d %b %Y %X +0000", gmtime())
Efield = get_electric_field(E0)
potential = get_potential()
print strftime("%a, %d %b %Y %X +0000", gmtime())
#print 'dips', d
print 'splitting dips onto atoms'
split_d = split_dipoles_onto_atoms(d)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'summing dips:'
tot = np.matrix([0.,0.,0.])
for dd in split_d:
tot += dd
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'total dip moment', tot
Uqq = np.multiply(get_U_qq(potential=potential),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqq', Uqq
Uqd = np.multiply(get_U_qdip(dips=d,Efield=Efield),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Uqd', Uqd
Udd = np.multiply(get_U_dipdip(jm=jm._m,dips=d.T),27.211)
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Udd', Udd
energyev = Udd+Uqd+Uqq
print 'energyev', energyev
energy=energyev/27.211
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'Making .dat cross sections for gnuplot'
# print TVs
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_TVs.dat' % name, 'w')
TVstr=str(str(TV[0,0]) + ' ' + str(TV[0,1]) + ' ' + str(TV[0,2]) + '\n' + str(TV[1,0]) + ' ' + str(TV[1,1]) + ' ' + str(TV[1,2]) + '\n' + str(TV[2,0]) + ' ' + str(TV[2,1]) + ' ' + str(TV[2,2])+ '\n')
f.write(TVstr)
f.flush()
f.close()
# print dipoles
if not os.path.exists('Dips_Posns_TVs'): os.makedirs('Dips_Posns_TVs')
f = open('Dips_Posns_TVs/%s_dipoles.dat' % name, 'w')
for dd in split_d:
dstr=str(dd)
f.write(dstr)
f.write('\n')
f.flush()
f.close()
# print properties for charge in centrepos
time=strftime("%a, %d %b %Y %X +0000", gmtime())
f = open('%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\tenergyev\tUqq\tUqd\tUdd\tTotdip_x\tTotdip_y\tTotdip_z')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,energyev,Uqq,Uqd,Udd,tot[0,0],tot[0,1],tot[0,2]))
f.flush()
f.close()
# print header for reorgs
f = open('reorg_energies_%s_properties.csv' % name, 'w')
f.write ('time\tname\tmols_cen\tmols_sur\tmols_outer\tinsize\toutsize\ta\tb\tc\tmolincell\tReorg(eV)')
f.flush()
f.close()
# REORGANISATION ENERGIES
#Note that this assumes a cube, and values for which
for dist in range(0,(length[0]/2)+1,1):
print '\n\nDIST: ', dist, '\n'
for a in range(prot_neut_cry()._cenpos[0]-dist,prot_neut_cry()._cenpos[0]+dist+1,1):
for b in range(prot_neut_cry()._cenpos[1]-dist,prot_neut_cry()._cenpos[1]+dist+1,1):
for c in range(prot_neut_cry()._cenpos[2]-dist,prot_neut_cry()._cenpos[2]+dist+1,1):
print strftime("%a, %d %b %Y %X +0000", gmtime())
print 'a,b,c',a,b,c
for molincell in range(0,len(prot_neut_cry()._mols),1):
prot_neut_cry().calc_reorg(a1=prot_neut_cry()._cenpos[0],b1=prot_neut_cry()._cenpos[1],c1=prot_neut_cry()._cenpos[2],molincell1=0,a2=a,b2=b,c2=c,molincell2=molincell,dips=d,oldUqd=Uqd)
print 'Reorg: ', prot_neut_cry()._reorgs[molincell][a][b][c]
f = open('reorg_energies_%s_properties.csv' % name, 'a')
f.write ('\n%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s' % (time,name,mols_cen,mols_sur,mols_outer,insize,outsize,a,b,c,molincell,prot_neut_cry()._reorgs[molincell][a][b][c]))
f.flush()
f.close()
# Redo this and overwrite after each set to ensure we have some even if not all reorgs complete
prot_neut_cry().print_reorgs()
print 'Job Completed Successfully.'
|
[
"sheridan.few@gmail.com"
] |
sheridan.few@gmail.com
|
c4903fe6cc73ed9888fa791de56a6e121c6445d0
|
37fa222d2ce4b227dfeeae0053b5110c24f0c595
|
/17/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM4000_R_0-3.py
|
bce53377e69348a7eb6d837b7dc54ee875322bfe
|
[] |
no_license
|
colizz/fullRunII_ntuple
|
8fffe7893ad80804c25444534b80edf3f1a09f97
|
ec8c014e9502f12d060bf8198894f915adcee267
|
refs/heads/master
| 2020-08-04T07:02:35.210954
| 2019-09-30T00:47:37
| 2019-09-30T00:47:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,251
|
py
|
from WMCore.Configuration import Configuration
name = 'WWW'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M4000_R0-3_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFchs.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK8PFPuppi.txt','Fall17_17Nov2017_V8_MC_L1FastJet_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L2Relative_AK4PFPuppi.txt','Fall17_17Nov2017_V8_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M4000-R0-3_TuneCP5_13TeV-madgraph/RunIIFall17MiniAODv2-PU2017_12Apr2018_94X_mc2017_realistic_v14-v2/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
#config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M4000_R0-3_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
|
[
"XXX@cern.ch"
] |
XXX@cern.ch
|
c29e43cc150ebaddaacdafbc5af0227a5f1666e4
|
23611933f0faba84fc82a1bc0a85d97cf45aba99
|
/google-cloud-sdk/lib/googlecloudsdk/api_lib/cloudkms/cryptokeyversions.py
|
fb554167e913c732d12bab736a12c9d70072871b
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] |
permissive
|
KaranToor/MA450
|
1f112d1caccebdc04702a77d5a6cee867c15f75c
|
c98b58aeb0994e011df960163541e9379ae7ea06
|
refs/heads/master
| 2021-06-21T06:17:42.585908
| 2020-12-24T00:36:28
| 2020-12-24T00:36:28
| 79,285,433
| 1
| 1
|
Apache-2.0
| 2020-12-24T00:38:09
| 2017-01-18T00:05:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,597
|
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for CryptoKeyVersions."""
from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
def SetState(version_ref, state):
"""Update the state of a CryptoKeyVersion.
Args:
version_ref: A resources.Resource for the CryptoKeyVersion.
state: an apitools enum for ENABLED or DISABLED state.
Returns:
The updated CryptoKeyVersion.
"""
client = cloudkms_base.GetClientInstance()
messages = cloudkms_base.GetMessagesModule()
req = messages.CloudkmsProjectsLocationsKeyRingsCryptoKeysCryptoKeyVersionsPatchRequest( # pylint: disable=line-too-long
projectsId=version_ref.projectsId,
locationsId=version_ref.locationsId,
keyRingsId=version_ref.keyRingsId,
cryptoKeysId=version_ref.cryptoKeysId,
cryptoKeyVersionsId=version_ref.cryptoKeyVersionsId,
updateMask='state',
cryptoKeyVersion=messages.CryptoKeyVersion(state=state))
return client.projects_locations_keyRings_cryptoKeys_cryptoKeyVersions.Patch(
req)
|
[
"toork@uw.edu"
] |
toork@uw.edu
|
3c5e0cf2977035d9ba3c53d6d0a367c274d8c0f1
|
9320f83e6006a7879df2fe9f3a16620b66becf65
|
/src/n8scripts/n8pushover.py
|
956a3a822de251e2166d398117b5f925639a112a
|
[
"MIT"
] |
permissive
|
n8henrie/n8scripts
|
e34a8d06252e30044815af401560322278ef23b2
|
7b79b2c4b7c5e6ef23aad4c2181f3b3886cdd7a8
|
refs/heads/master
| 2021-01-25T11:02:55.251973
| 2019-03-12T20:51:10
| 2019-03-12T20:51:10
| 93,908,287
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,325
|
py
|
"""n8pushover.py
A quick implementation of the Pushover API in Python, using an envvar or the
keyring module (desktop) or Pythonista's keychain module (iOS) to store
credentials.
Usage:
from n8scripts.n8pushover import push
push("It's alive!")
push("This one has a title.", title="My awesome title.")
"""
import __main__
import argparse
import http
import os.path
import subprocess
import sys
import typing
import urllib.parse
import urllib.request
class OSXSecurity:
"""Uses the MacOS `security` command following the keyring library API."""
def __init__(self):
"""Ensure platform is `darwin`."""
if sys.platform != "darwin":
raise OSError(f"{self.__class__ } can only run on MacOS (darwin)")
def get_password(self, service: str, account: str) -> str:
"""Use keychain API to get password for service / account."""
cmd = f"security find-generic-password -s {service} -a {account} -w"
process = subprocess.run(cmd.split(), stdout=subprocess.PIPE)
data = process.stdout
return data.decode("utf8").strip()
try:
import keychain
except ImportError:
try:
import keychain
except ImportError:
keychain = OSXSecurity()
def get_credentials() -> typing.Tuple[str, str]:
"""Get Pushover user and api_token."""
try:
user = os.environ["PUSHOVER_USER"]
api_token = os.environ["PUSHOVER_API_TOKEN"]
except KeyError:
user = keychain.get_password("pushover", "user")
api_token = keychain.get_password("pushover", "api_token")
return user, api_token
def push(
message,
user: str = None,
api_token: str = None,
device: str = None,
title: str = None,
url: str = None,
url_title: str = None,
priority: str = None,
timestamp: str = None,
sound: str = None,
) -> typing.Union[http.client.HTTPResponse, typing.BinaryIO]:
"""Pushes the notification.
API Reference: https://pushover.net/api
Args:
message: Your message
user: The user/group key (not e-mail address) of your user (or you),
viewable when logged into our dashboard (often referred to as
USER_KEY in our documentation and code examples)
api_token: Your application's API token
device: Your user's device name to send the message directly to that
device, rather than all of the user's devices
title: Your message's title, otherwise your app's name is used
url: A supplementary URL to show with your message
url_title: A title for your supplementary URL, otherwise just the URL
is shown
priority: Send as:1 to always send as a quiet notification, 1 to
display as high--priority and bypass the user's quiet
hours, or 2 to also require confirmation from the user
timestamp: A Unix timestamp of your message's date and time to
display to the user, rather than the time your message is
received by our API
sound: The name of one of the sounds supported by device clients to
override the user's default sound choice
Returns:
HTTP response from API call
"""
if user is None or api_token is None:
user, api_token = get_credentials()
api_url = "https://api.pushover.net/1/messages.json"
if title is None:
if getattr(__main__, "__file__", None):
title = os.path.basename(__main__.__file__)
else:
title = "n8scripts"
payload_dict = {
"token": api_token,
"user": user,
"message": message,
"device": device,
"title": title,
"url": url,
"url_title": url_title,
"priority": priority,
"timestamp": timestamp,
"sound": sound,
}
payload = urllib.parse.urlencode({k: v for k, v in payload_dict.items() if v})
with urllib.request.urlopen(api_url, data=payload.encode()) as resp:
return resp
def cli() -> None:
"""Collect command line args and run push."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
argument_default=argparse.SUPPRESS,
)
parser.add_argument("message", help="Your message")
parser.add_argument(
"-u",
"--user",
help=("The user/group key (not e-mail address) of " "your user (or you)"),
)
parser.add_argument("-a", "--api-token", help="Your application's API token")
parser.add_argument(
"-d",
"--device",
help=(
"Your user's device name to send the message "
"directly to that device, rather than all of "
"the user's devices (multiple devices may be "
"separated by a comma)"
),
)
parser.add_argument(
"-t",
"--title",
help=("Your message's title, otherwise your app's " "name is used"),
)
parser.add_argument(
"-k", "--url", help="A supplementary URL to show with your message"
)
parser.add_argument(
"-l",
"--url_title",
help=("A title for your supplementary URL, otherwise " "just the URL is shown"),
)
parser.add_argument(
"-p",
"--priority",
help=(
"Send as -2 to generate no notification/alert, "
"-1 to always send as a quiet notification, 1 "
"to display as high-priority and bypass the "
"user's quiet hours, or 2 to also require "
"confirmation from the user"
),
)
parser.add_argument(
"-m",
"--timestamp",
help=(
"A Unix timestamp of your message's date and "
"time to display to the user, rather than the "
"time your message is received by our API"
),
)
parser.add_argument(
"-s",
"--sound",
help=(
"The name of one of the sounds supported by "
"device clients to override the user's default "
"sound choice"
),
)
namespace = parser.parse_args()
args = {k: v for k, v in vars(namespace).items() if v}
push(**args)
if __name__ == "__main__":
cli()
|
[
"nate@n8henrie.com"
] |
nate@n8henrie.com
|
9e5d840e0bcb9bf3ab4aa6ccd3172973d8c3ce34
|
902e0bcd7abd0eafb1daf820f5009e632bfe9141
|
/courses/migrations/0001_initial.py
|
3d65d04b748eb39bcd4d755cb7a4d05f62aacabd
|
[] |
no_license
|
ihfazhillah/educa-lms
|
1ba4aebcfc7b68b6b80c3cacff0eeabb3024344b
|
e0c4ef46a147cc187297291db5adf78cc7da617d
|
refs/heads/master
| 2020-03-28T09:38:34.998747
| 2018-09-22T16:03:49
| 2018-09-22T16:03:49
| 148,048,405
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,027
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
('overview', models.TextField(blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('owner', models.ForeignKey(related_name='courses_created', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='Module',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=200)),
('description', models.TextField(blank=True)),
('course', models.ForeignKey(related_name='courses', to='courses.Course')),
],
),
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('title', models.CharField(max_length=200)),
('slug', models.SlugField(max_length=200, unique=True)),
],
options={
'ordering': ('title',),
},
),
migrations.AddField(
model_name='course',
name='subject',
field=models.ForeignKey(related_name='courses', to='courses.Subject'),
),
]
|
[
"mihfazhillah@gmail.com"
] |
mihfazhillah@gmail.com
|
5f9bd9bb49499a97c6bc43ca09d6cbf41a34a357
|
68c49c51d04aa8c87e673784659088c1a5e4aeea
|
/database_reader/physionet_databases/capslpdb.py
|
736f9190ce6652beb0c6940ec11e2e8869438217
|
[
"MIT"
] |
permissive
|
wenh06/database_reader
|
9b1773c824ab62213e27f9e8c4144c098a13e410
|
784ea882e78791979ab020da403b97ea50b9d075
|
refs/heads/master
| 2023-06-05T09:32:48.164050
| 2021-06-24T09:45:33
| 2021-06-24T09:45:33
| 370,729,791
| 0
| 0
|
MIT
| 2021-05-25T14:52:58
| 2021-05-25T14:52:57
| null |
UTF-8
|
Python
| false
| false
| 4,410
|
py
|
# -*- coding: utf-8 -*-
"""
"""
import os
from datetime import datetime
from typing import Union, Optional, Any, List, NoReturn
from numbers import Real
import wfdb
import numpy as np
np.set_printoptions(precision=5, suppress=True)
import pandas as pd
from ..utils.common import (
ArrayLike,
get_record_list_recursive,
)
from ..base import PhysioNetDataBase
__all__ = [
"CAPSLPDB",
]
class CAPSLPDB(PhysioNetDataBase):
""" NOT finished,
CAP Sleep Database
ABOUT capslpdb
--------------
1. contains 108 polysomnographic (PSG) recordings, including 16 healthy subjects and 92 pathological recordings, in EDF format, NOT the usual wfdb .dat format
2. The 92 pathological recordings include 40 recordings of patients diagnosed with nocturnal frontal lobe epilepsy (NFLE), 22 affected by REM behavior disorder (RBD), 10 with periodic leg movements (PLM), 9 insomniac, 5 narcoleptic, 4 affected by sleep-disordered breathing (SDB) and 2 by bruxism
3.
NOTE
----
1. background knowledge aboute CAP:
The Cyclic Alternating Pattern (CAP) is a periodic EEG activity occurring during NREM sleep. It is characterized by cyclic sequences of cerebral activation (phase A) followed by periods of deactivation (phase B) which separate two successive phase A periods with an interval <1 min. A phase A period and the following phase B period define a CAP cycle, and at least two CAP cycles are required to form a CAP sequence
ISSUES
------
Usage
-----
1. sleep stage
1. sleep cyclic alternating pattern
References
----------
[1] https://physionet.org/content/capslpdb/1.0.0/
"""
def __init__(self, db_dir:Optional[str]=None, working_dir:Optional[str]=None, verbose:int=2, **kwargs:Any) -> NoReturn:
"""
Parameters
----------
db_dir: str, optional,
storage path of the database
if not specified, data will be fetched from Physionet
working_dir: str, optional,
working directory, to store intermediate files and log file
verbose: int, default 2,
log verbosity
kwargs: auxilliary key word arguments
"""
super().__init__(db_name="capslpdb", db_dir=db_dir, working_dir=working_dir, verbose=verbose, **kwargs)
self.data_ext = "edf"
self.ann_ext = "st"
self.alias_ann_ext = "txt"
self.fs = None # psg data with different frequencies for each signal
self._ls_rec()
def _ls_rec(self, local:bool=True) -> NoReturn:
""" finished, checked,
find all records (relative path without file extension),
and save into `self._all_records` for further use
Parameters
----------
local: bool, default True,
if True, read from local storage, prior to using `wfdb.get_record_list`
"""
try:
super()._ls_rec(local=local)
except:
self._all_records = [
"brux1", "brux2",
"ins1", "ins2", "ins3", "ins4", "ins5", "ins6", "ins7", "ins8", "ins9",
"n10", "n11", "n12", "n13", "n14", "n15", "n16",
"n1", "n2", "n3", "n4", "n5", "n6", "n7", "n8", "n9",
"narco1", "narco2", "narco3", "narco4", "narco5",
"nfle10", "nfle11", "nfle12", "nfle13", "nfle14", "nfle15", "nfle16",
"nfle17", "nfle18", "nfle19", "nfle1", "nfle20", "nfle21", "nfle22",
"nfle23", "nfle24", "nfle25", "nfle26", "nfle27", "nfle28", "nfle29",
"nfle2", "nfle30", "nfle31", "nfle32", "nfle33", "nfle34", "nfle35",
"nfle36", "nfle37", "nfle38", "nfle39", "nfle3", "nfle40", "nfle4",
"nfle5", "nfle6", "nfle7", "nfle8", "nfle9",
"plm10", "plm1", "plm2", "plm3", "plm4", "plm5", "plm6", "plm7", "plm8", "plm9",
"rbd10", "rbd11", "rbd12", "rbd13", "rbd14", "rbd15", "rbd16", "rbd17",
"rbd18", "rbd19", "rbd1", "rbd20", "rbd21", "rbd22", "rbd2", "rbd3", "rbd4",
"rbd5", "rbd6", "rbd7", "rbd8", "rbd9",
"sdb1", "sdb2", "sdb3", "sdb4",
]
def get_subject_id(self, rec) -> int:
"""
"""
raise NotImplementedError
def database_info(self) -> NoReturn:
"""
"""
print(self.__doc__)
|
[
"wenh06@gmail.com"
] |
wenh06@gmail.com
|
389460e272923131109704dd69233dfb92abaa37
|
a4a01e251b194f6d3c6654a2947a33fec2c03e80
|
/PythonWeb/Flask/1809Flask/Flaskday02/flaskdemo02/run.py
|
dbf71b23902098d9f244460512522d83331dd8f8
|
[] |
no_license
|
demo112/1809
|
033019043e2e95ebc637b40eaf11c76bfd089626
|
e22972229e5e7831dce2aae0b53ce19a6e3bb106
|
refs/heads/master
| 2020-04-09T07:10:49.906231
| 2019-02-27T13:08:45
| 2019-02-27T13:08:45
| 160,143,869
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,037
|
py
|
from flask import Flask
from flask import render_template
app = Flask(__name__)
@app.route("/01-selftemp")
def selftemp():
html = "<!doctype html>"
html += "<html>"
html += "<head>"
html += "<title>"
html += "我自己的模版"
html += "</title>"
html += "</head>"
html += "<body>"
html += "<h1 color=red>"
html += "这是我第一个模版"
html += "</h1>"
html += "</body>"
html += "</html>"
return html
@app.route("/02-temp")
def template_views():
# html = render_template('index.html')
html = render_template(
'02-temp.html',
name="wangwc",
age=35,
gender="male")
return html
@app.route("/03-temp")
def template_views2():
html = render_template('03-temp.html', name1='歌名:《绿光》', name2='作词:宝强', name3='作词:奶亮', name4='演唱:羽凡')
return html
@app.route("/04-temp")
def template_views3():
name1 = '歌名:《绿光》'
name2 = '作词:宝强'
name3 = '作词:奶亮'
name4 = '演唱:羽凡'
html = render_template('04-temp.html', params=locals())
return html
@app.route("/04-var")
def var():
pass
uname = '他爸爸'
delay = 880
lis = ['阿珂', '兰陵王', ' 孙悟空']
tup = ('阿珂', '兰陵王', ' 孙悟空')
dic = {
'AK': '阿珂', 'LLW': '兰陵王', 'WZJ': ' 孙悟空'
}
game = Game()
print(locals())
return render_template('04-var.html', params=locals())
@app.route("/05-filter")
def filter1():
ustr = "this is a test string"
return render_template("05-filter.html", params=locals())
@app.route("/05-macro")
def marco():
lis = ["孙悟空", "西门庆", "刘姥姥", "小乔"]
return render_template("05-macro.html", list=lis)
@app.route("/image")
def image():
return render_template("image.html")
class Game(object):
group = '深渊'
def prt(self):
return "测试内容" + self.group
if __name__ == "__main__":
app.run(debug=True)
|
[
"huafengdongji@hotmail.com"
] |
huafengdongji@hotmail.com
|
aa7534f4669baf9ee235e5cc5d793cae77d48129
|
4a4352800a7d9f26c4f2cd6c7e00a54e4fdc6517
|
/Filters/BaseFilter.py
|
d5566bee291fef7664eb1e5b088c7e1c8561d69e
|
[] |
no_license
|
nag92/ExoServer
|
914e9b8b03a0c29211d1c1b6f22113cbf8924ad0
|
d9006db8cf821fe0c552df13958797456d7ff0e2
|
refs/heads/master
| 2023-01-23T17:59:16.080404
| 2020-10-29T17:24:07
| 2020-10-29T17:24:07
| 261,233,186
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
import abc
from Sensors import Sensor
class BaseFilter(object):
"""
base class for the filters
"""
def __init__(self, sensor):
"""
:param sensor: sensor
:type sensor: Sensor.Sensor
"""
self.sensor = sensor
self.values = []
@abc.abstractmethod
def update(self, value):
"""
update the values with the filter
:param value: new values
:return: updated value
"""
return value
|
[
"nagoldfarb@wpi.edu"
] |
nagoldfarb@wpi.edu
|
0cbbf3590949fb9230b951dfa529e4a582a7587d
|
176839e6f94e593fb957f0af1bd5682c95e44f8f
|
/exoplanet/theano_ops/celerite/factor_rev.py
|
552d24cd4b7691d849b6b32a0ecd977c8a84b86e
|
[
"MIT"
] |
permissive
|
Junjun1guo/exoplanet
|
8a0a9d4deb351744a78db54801c4a9d9834e7f7a
|
5df07b16cf7f8770f02fa53598ae3961021cfd0f
|
refs/heads/master
| 2020-05-17T17:51:14.836055
| 2019-04-26T20:10:28
| 2019-04-26T20:10:28
| 183,867,012
| 2
| 0
| null | 2019-04-28T06:38:30
| 2019-04-28T06:38:30
| null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division, print_function
__all__ = ["FactorRevOp"]
from .base_op import CeleriteBaseOp
class FactorRevOp(CeleriteBaseOp):
func_file = "./factor_rev.cc"
func_name = "APPLY_SPECIFIC(factor_rev)"
num_input = 7
output_ndim = (1, 2, 2, 2)
def __init__(self, J=-1):
super(FactorRevOp, self).__init__(J=J)
|
[
"foreman.mackey@gmail.com"
] |
foreman.mackey@gmail.com
|
c00a910073398520cb97fc2609c3d5f4d8934baa
|
3af8dfb5bc0a759f7237f10504dd28dfc2489d7e
|
/api/allennlp_demo/roberta_sentiment_analysis/test_api.py
|
680638c082a6da1b492e39d0f2cf0f1dd9e3e925
|
[
"Apache-2.0"
] |
permissive
|
allenai/allennlp-demo
|
a710fca880b8de9d829790b7161fe8465deb15cc
|
afa862f1b473331f1157c1ee158ea202425fb10d
|
refs/heads/main
| 2023-08-31T22:20:03.464642
| 2023-01-20T20:01:13
| 2023-01-20T20:01:13
| 136,056,285
| 200
| 93
|
Apache-2.0
| 2023-01-16T17:14:38
| 2018-06-04T16:53:08
|
TypeScript
|
UTF-8
|
Python
| false
| false
| 373
|
py
|
from allennlp_demo.common.testing import ModelEndpointTestCase
from allennlp_demo.roberta_sentiment_analysis.api import RobertaSentimentAnalysisModelEndpoint
class TestRobertaSentimentAnalysisModelEndpoint(ModelEndpointTestCase):
endpoint = RobertaSentimentAnalysisModelEndpoint()
predict_input = {"sentence": "a very well-made, funny and entertaining picture."}
|
[
"noreply@github.com"
] |
allenai.noreply@github.com
|
b8d5dad94992b55574b60c2e52fc6f49923f7f1c
|
a458e773d46ad88725f07bdf9ac07d9608ddfb47
|
/pages/views.py
|
a9240ed5511ee80c9ad873e5611146db5a21ccaa
|
[] |
no_license
|
SonerArslan2019/egitim_sitesi
|
82fd8dbe2bad593094a9caf85f06c7f86c96b064
|
8f5cabd5190334f47ef5beda10f8513be6ff9672
|
refs/heads/master
| 2023-03-15T09:18:38.771211
| 2021-03-15T18:13:46
| 2021-03-15T18:13:46
| 346,806,305
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,335
|
py
|
from django.shortcuts import render
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from courses.models import Course
from . forms import ContactForm
from django.urls import reverse_lazy
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.models import User
from teachers.models import Teacher
class IndexView(TemplateView):
template_name = 'index.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['courses'] = Course.objects.filter(available=True).order_by('-date')[:2]
context['total_course'] = Course.objects.filter(available=True).count()
context['total_students'] = User.objects.count()
context['total_teachers'] = Teacher.objects.count()
return context
#def index(request):
# return render(request, 'index.html')
class AboutView(TemplateView):
template_name = 'about.html'
#def about(request):
# return render(request, 'about.html')
class ContactView(SuccessMessageMixin, FormView):
template_name = 'contact.html'
form_class = ContactForm
success_url = reverse_lazy('contact')
success_message = 'We received your request'
def form_valid(self, form):
form.save()
return super().form_valid(form)
|
[
"soner@arslanyapi.com.tr"
] |
soner@arslanyapi.com.tr
|
1198b591a26c7bda00f1a072d5be35b687aba6e0
|
5a3547772b61f7d1b3a81f76dd1397eb92c68e7b
|
/slbo/envs/mujoco/ant_task_env.py
|
e819bb593f60567ec02d372d834e8d5847400f92
|
[
"MIT"
] |
permissive
|
suen049/AdMRL
|
483440f0ded14e471d879b300da9afbab68fbe66
|
50a22d4d480e99125cc91cc65dfcc0df4a883ac6
|
refs/heads/master
| 2023-03-12T23:15:05.154003
| 2021-03-06T15:31:21
| 2021-03-06T15:31:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,636
|
py
|
import math
import numpy as np
from rllab.envs.mujoco import ant_task_env
from rllab.envs.base import Step
from slbo.envs import BaseModelBasedEnv
AntTaskConfig = ant_task_env.AntTaskConfig
class AntTaskEnv(ant_task_env.AntTaskEnv, BaseModelBasedEnv):
def get_current_obs(self):
return np.concatenate([
self.model.data.qpos.flat, # 15
self.model.data.qvel.flat, # 14
# np.clip(self.model.data.cfrc_ext, -1, 1).flat, # 84
self.get_body_xmat("torso").flat, # 9
self.get_body_com("torso"), # 9
self.get_body_comvel("torso"), # 3
]).reshape(-1)
def step(self, action):
self.forward_dynamics(action)
comvel = self.get_body_comvel("torso")
if self._task_config.goal_velocity == -math.inf:
forward_reward = -1 * comvel[0]
elif self._task_config.goal_velocity == math.inf:
forward_reward = comvel[0]
else:
forward_reward = -np.abs(comvel[0] - self._task_config.goal_velocity) + 1.0
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(action / scaling))
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
state = self._state
notdone = np.isfinite(state).all() and state[2] >= 0.2 and state[2] <= 1.0
done = not notdone
ob = self.get_current_obs()
return Step(ob, float(reward), done)
def mb_step(self, states: np.ndarray, actions: np.ndarray, next_states: np.ndarray):
comvel = next_states[..., -3:]
if self._task_config.goal_velocity == -math.inf:
forward_reward = -1 * comvel[..., 0]
elif self._task_config.goal_velocity == math.inf:
forward_reward = comvel[..., 0]
else:
forward_reward = -np.abs(comvel[..., 0] - self._task_config.goal_velocity) + 1.0
lb, ub = self.action_bounds
scaling = (ub - lb) * 0.5
ctrl_cost = 0.5 * 1e-2 * np.sum(np.square(actions / scaling), axis=-1)
contact_cost = 0.
# contact_cost = 0.5 * 1e-3 * np.sum(
# np.square(np.clip(self.model.data.cfrc_ext, -1, 1))),
survive_reward = 0.05
reward = forward_reward - ctrl_cost - contact_cost + survive_reward
notdone = np.all([next_states[..., 2] >= 0.2, next_states[..., 2] <= 1.0], axis=0)
return reward, 1. - notdone
|
[
"linzichuan12@163.com"
] |
linzichuan12@163.com
|
96ed4243504b965ffe7bb44a77193977ffd463fd
|
96cb01cdbef51a9da25e1de68c7318572b69510f
|
/test/test_coupling.py
|
2e2904e528f87e46a22a7a510cc250e258921084
|
[] |
no_license
|
pobot-pybot/pybot-youpi2
|
e269efffb98083fc51b6d947dc8278bf644d4092
|
a93a9acf40814583ba6816d265cc18a1bb61a72f
|
refs/heads/master
| 2021-01-18T19:45:55.018721
| 2016-10-13T21:24:30
| 2016-10-13T21:24:30
| 69,095,735
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,639
|
py
|
import unittest
from pybot.youpi2.model import YoupiArm
class JointToMotorTestCase(unittest.TestCase):
def test_01(self):
angles = {
YoupiArm.MOTOR_BASE: 10
}
angles_orig = angles.copy()
YoupiArm.joint_to_motor(angles)
self.assertDictEqual(angles, angles_orig)
def test_02(self):
angles = {
YoupiArm.MOTOR_BASE: 0,
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 0,
YoupiArm.MOTOR_WRIST: 0,
YoupiArm.MOTOR_HAND_ROT: 0,
}
YoupiArm.joint_to_motor(angles)
self.assertDictEqual(angles, {
YoupiArm.MOTOR_BASE: 0,
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 10,
YoupiArm.MOTOR_WRIST: 10,
YoupiArm.MOTOR_HAND_ROT: -10,
})
class MotorToJointTestCase(unittest.TestCase):
def test_01(self):
angles = {
YoupiArm.MOTOR_BASE: 10
}
angles_orig = angles.copy()
YoupiArm.motor_to_joint(angles)
self.assertDictEqual(angles, angles_orig)
def test_02(self):
angles = {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 10,
YoupiArm.MOTOR_WRIST: 10,
YoupiArm.MOTOR_HAND_ROT: -10,
}
YoupiArm.motor_to_joint(angles)
self.assertDictEqual(angles, {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 0,
YoupiArm.MOTOR_WRIST: 0,
YoupiArm.MOTOR_HAND_ROT: 0,
})
def test_03(self):
angles = {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 20,
YoupiArm.MOTOR_WRIST: 30,
YoupiArm.MOTOR_HAND_ROT: -50,
}
YoupiArm.motor_to_joint(angles)
self.assertDictEqual(angles, {
YoupiArm.MOTOR_SHOULDER: 10,
YoupiArm.MOTOR_ELBOW: 10,
YoupiArm.MOTOR_WRIST: 10,
YoupiArm.MOTOR_HAND_ROT: -20,
})
class GlobalToLocalTestCase(unittest.TestCase):
def test_01(self):
_global = [10, 0, 0, 0, 0, 0]
_local = YoupiArm.global_to_local(_global)
self.assertEqual(_local, _global)
def test_02(self):
_global = [0, 10, 10, 10, -10, 0]
_local = YoupiArm.global_to_local(_global)
self.assertEqual(_local, [0, 10, 0, 0, 0, 0])
def test_03(self):
_global = [0, 10, 20, 30, -50, 0]
_local = YoupiArm.global_to_local(_global)
self.assertEqual(_local, [0, 10, 10, 10, -20, 0])
if __name__ == '__main__':
unittest.main()
|
[
"eric@pobot.org"
] |
eric@pobot.org
|
7fd91a257c9d2ed17a8aef21c95ccf7cf487178c
|
62b2a1a9cea1662a86fa410fe91448ad6805d7b5
|
/variability/plot_agn_tau_distribution.py
|
5fd1d86f1494be77ada773b1f7a2dbe325fecb92
|
[] |
no_license
|
danielsf/CatSimMaintenance
|
f15dd74486f48c740bce2f4e3b6fdb60ab5d8c6f
|
6f17f96b189aa0f860d316ffbe58483926123f4c
|
refs/heads/master
| 2018-07-18T22:12:23.005153
| 2018-06-01T21:47:39
| 2018-06-01T21:47:39
| 105,593,369
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,578
|
py
|
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
def make_histogram(xx_in, dmag, cut_off, min_val = None, cumulative=True):
xx = xx_in[np.where(xx_in<=cut_off+dmag)]
#print xx.min(),xx.max()
if min_val is None:
min_val=xx.min()-dmag
i_xx = np.round((xx-min_val)/dmag).astype(int)
unique_ixx, ct = np.unique(i_xx, return_counts=True)
if cumulative:
return unique_ixx*dmag+min_val, ct.astype(float)/float(len(xx_in))
else:
return unique_ixx*dmag+min_val, ct.astype(int)
if __name__ == "__main__":
dtype = np.dtype([('z', float), ('tau', float)])
data = np.genfromtxt('agn_tau_distribution.txt', dtype=dtype)
plt.figsize=(30,30)
tau_renorm = np.log10(data['tau']/(1.0+data['z']))
tau = np.log10(data['tau'])
tau_min = tau.min()
tau_max = tau.max()
tau_renorm_min = tau_renorm.min()
tau_renorm_max = tau_renorm.max()
tau_min = min(tau_min, tau_renorm_min)
tau_max = max(tau_max, tau_renorm_max)
dtau = 0.1
tau_grid, tau_hist = make_histogram(tau, dtau, tau_max+dtau,
cumulative=False)
(tau_renorm_grid,
tau_renorm_hist) = make_histogram(tau_renorm, dtau, tau_max+dtau,
cumulative=False)
t_l, = plt.plot(tau_grid, tau_hist)
t_r_l, = plt.plot(tau_renorm_grid, tau_renorm_hist)
plt.legend([t_l, t_r_l],['$\\tau$', '$\\tau/(1+z)$'], loc=0)
plt.xlim(0, 5)
plt.xlabel('$\log(\\tau)$')
plt.savefig('agn_tau_dist_fig.png')
|
[
"scott.f.daniel@gmail.com"
] |
scott.f.daniel@gmail.com
|
554de90b84acb3be0a4506092e5f1e2af577bec8
|
c13261f07803218ff29238b3a455650316506e05
|
/light8/configconvert.py
|
d5b0f0a32452f3cf3ccb555ca4ae4e08d33d842f
|
[] |
no_license
|
shakaran/light9
|
3456427f718f43b829d34794bafc22d74305d30a
|
91c86b030475e65f92c90adb0a0920f1fb9996eb
|
refs/heads/master
| 2021-01-20T23:37:04.881942
| 2012-06-13T05:35:04
| 2012-06-13T05:35:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,196
|
py
|
from Config import subs
import Patch
Patch.reload_data(0)
def print_tsv(filename,allchans,subs):
f=open(filename,"w")
print >>f,"\t"+"\t".join(allchans)
for name,levels in subs.items():
normd={}
# nrmalize the names in the sub
for k,v in levels.items():
normd[Patch.resolve_name(k)]=v
print >>f,"%s\t%s" % (name, "\t".join([str(normd.get(c,"")) for c in allchans]))
def read_tsv(filename,outname):
"""converts from tsv filename to a config file (python) named outname"""
f=open(filename,'r')
out=open(outname,'w')
allchans=f.readline().split("\t")[1:]
for line in f.xreadlines():
spl=line.split("\t")
subname=spl[0]
print >>out,"subs['%s']={" % subname,
for channame,level in zip(allchans,spl[1:]):
try:
if level!="" and int(level)>0:
print >>out,"'%s': %s," %(channame,level),
except ValueError:
pass
print >>out,"}\n"
#print_tsv(filename="sublevs.txt",allchans=Patch.get_all_channels(),subs=subs)
read_tsv(filename="sublevs-fixed",outname="Configsubs-fixed.py")
|
[
"none"
] |
none
|
194d96f6626df79c9dd21202fcc29fe2b79e3d3b
|
f66e6a3bc5f6eae570afa2013325d462f530cff6
|
/core/seller/migrations/0054_auto_20210514_1705.py
|
d8258c0702eeeea09cfae4fd507edfce45254a9f
|
[] |
no_license
|
Mahe07/vyavaharback
|
3cb30e227d9e0c25c86ba4e20f9cafce054c4a2a
|
4e35cac3b643197a78e420d34ea3f45cce368e46
|
refs/heads/main
| 2023-08-10T17:21:56.538518
| 2021-09-17T03:53:44
| 2021-09-17T03:53:44
| 407,386,489
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# Generated by Django 3.1.4 on 2021-05-14 11:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('seller', '0053_auto_20210503_1805'),
]
operations = [
migrations.AlterField(
model_name='seller',
name='seller_status',
field=models.CharField(choices=[('Register', 'Register'), ('Approved', 'Approved'), ('Verified', 'Verified'), ('Draft', 'Draft'), ('Drop', 'Drop')], default='Register', max_length=100, null=True),
),
]
|
[
"noreply@github.com"
] |
Mahe07.noreply@github.com
|
128cc0d6f08d2665af3af8ff3948a1e1ab1f15ef
|
897802abf4ee5c7267de3eb5e321cc931898e2f6
|
/python/python/eric/part2_project/project01_game_aliens/bullet.py
|
94d931467ed36bd4cf2e943964aa9f74215ee800
|
[] |
no_license
|
aojie654/codes_store
|
0527c7a7729b472e8fd2fd67af462cf857970633
|
ed71b6266b2d2b5ddefadcb958f17695fb9db6cf
|
refs/heads/master
| 2021-07-15T17:04:33.591673
| 2021-07-03T14:42:30
| 2021-07-03T14:42:30
| 132,343,733
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,138
|
py
|
# coding=utf-8
# @File : bullet
# @Author: aojie654
# @Date : 18-6-10 下午4:15
# @Desc : Bullet
import pygame as pg
from pygame.sprite import Sprite
class Bullet(Sprite):
"""A class to manage bullet"""
def __init__(self, ai_settings, screen, ship):
"""Create a bullet object at position where the ship is"""
super(Bullet, self).__init__()
self.screen = screen
# Set a rectangle at position (0,0), then set the correct
self.rect = pg.Rect(0, 0, ai_settings.bullet_width, ai_settings.bullet_height)
self.rect.centerx = ship.rect.centerx
self.rect.top = ship.rect.top
# Store bullet position with decimal
self.y = float(self.rect.y)
# Set color and speed
self.color = ai_settings.bullet_color
self.speed_factor = ai_settings.bullet_speed_factor
def update(self):
"""Move up"""
# update position value
self.y -= self.speed_factor
# update rect position
self.rect.y = self.y
def draw_bullet(self):
"""Draw bullet"""
pg.draw.rect(self.screen, self.color, self.rect)
|
[
"aojie654@live.cn"
] |
aojie654@live.cn
|
15705a89c31afbb086c3f166ae551352f6725885
|
34652a47355a8dbe9200db229a1bbc62619de364
|
/Algorithms/Recursion/tst01.py
|
c5a4b031f4ff2b599d55cd84ccb09657de44c1a8
|
[] |
no_license
|
btrif/Python_dev_repo
|
df34ab7066eab662a5c11467d390e067ab5bf0f8
|
b4c81010a1476721cabc2621b17d92fead9314b4
|
refs/heads/master
| 2020-04-02T13:34:11.655162
| 2019-11-10T11:08:23
| 2019-11-10T11:08:23
| 154,487,015
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 926
|
py
|
# Created by Bogdan Trif on 30-06-2018 , 3:16 PM.
import turtle
myTurtle = turtle.Turtle()
myWin = turtle.Screen()
myTurtle.speed(10)
def drawTriangle( points , color , myTurtle ):
myTurtle.fillcolor(color)
myTurtle.up()
myTurtle.goto(points[0][0],points[0][1]) # ( -100, -50 )
myTurtle.down()
myTurtle.begin_fill()
myTurtle.goto(points[1][0],points[1][1]) # (0 , 100)
myTurtle.goto(points[2][0],points[2][1]) # [100,-50]
myTurtle.goto(points[0][0],points[0][1]) # ( -100, -50 )
myTurtle.end_fill()
def triangle(line_len):
for i in range(3) :
myTurtle.forward(line_len)
myTurtle.left(120)
myTurtle.forward(line_len//2)
myTurtle.left( 60 )
side_len = 400
myPoints = [ [-side_len , -side_len//2 ] , [ 0, side_len ],[ side_len,-side_len//2 ] ]
# triangle(300)
drawTriangle(myPoints, 'olive', myTurtle)
myWin.exitonclick()
|
[
"bogdan.evanzo@gmail.com"
] |
bogdan.evanzo@gmail.com
|
81707d27a4d72edd3e5bcd7db29b753d65389996
|
36bc2f2b52201ccc7ca11035fd0c66a8fe64d3f5
|
/lint_domain_packages/interfaces.py
|
1467db18f4b330fd2f46f364a420ddc7e4cea1d9
|
[
"MIT"
] |
permissive
|
imankulov/lint-domain-packages
|
fc46ba36ca739e31e152be79d3f609abd0af074a
|
cc6b7a33bdb0a5e022feee9d22d7f93c9f069680
|
refs/heads/main
| 2023-05-14T18:42:11.530567
| 2021-05-27T08:55:01
| 2021-05-27T08:55:55
| 370,928,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,796
|
py
|
import abc
from dataclasses import dataclass
from grimp.application.ports.graph import AbstractImportGraph
@dataclass
class LinterSettings:
# Root package that we analyzes
root: str
# List of public domain packages.
#
# All modules of public domain packages are open to import. Usually, those are
# so-called, "utility packages", an assorted list of helper classes and functions.
#
# Note that marking a domain package as public doesn't automatically add it to
# the list of dependencies.
public_packages: list[str]
# List of public modules.
#
# Usually contains things like services and interfaces, but doesn't contain
# things that are specific to internal implementation of the package.
#
# Applies to all domain packages.
#
# Note that in order to be able to import these modules from the
# outside, you need to add the enclosing package in dependencies
public_modules: list[str]
# A self-imposed dependency map.
#
# Contains mapping from dependent modules to depending ones.
# For example, dependencies={"payments": ["users", "projects"]} means that
# the domain package "payments" depends on (imports) packages "users" and
# "projects"
dependencies: dict[str, list[str]]
def is_public(self, module_import_path: str) -> bool:
"""
Return true if module is public.
The module is considered public, if it belongs to a public domain package
(like, "myproject.utils") or the top-level module is public itself.
(like, "myproject.foo.services").
"""
chunks = self._get_module_chunks(module_import_path)
root, package = chunks[:2]
if len(chunks) > 2:
toplevel_module = chunks[2]
else:
toplevel_module = None # doesn't exist
if package in self.public_packages:
return True
if toplevel_module and toplevel_module in self.public_modules:
return True
return False
def listed_in_dependencies(
self, module_import_path: str, imported_module_import_path: str
) -> bool:
"""
Return True if the package of `imported_module_import_path` is marked as
a dependency of the package of `module_import_path`.
"""
package_name = self._get_module_chunks(module_import_path)[1]
imported_package_name = self._get_module_chunks(imported_module_import_path)[1]
if package_name not in self.dependencies:
return False
return imported_package_name in self.dependencies[package_name]
def _get_module_chunks(self, module_import_path):
chunks = module_import_path.split(".")
if chunks[0] != self.root:
raise RuntimeError(f"{module_import_path} doesn't belong to {self.root}")
return chunks
@dataclass
class ImportDetails:
line_number: int
line_contents: str
@dataclass
class ImportViolationGroup:
group_key: str
error_message: str
violations: list["ImportViolation"]
@dataclass
class ImportViolation:
"""Generic class for an import violation."""
graph: AbstractImportGraph
importer: str
imported: str
def get_import_details(self) -> ImportDetails:
details = self.graph.get_import_details(
importer=self.importer, imported=self.imported
)[0]
return ImportDetails(details["line_number"], details["line_contents"])
def get_location(self) -> str:
details = self.get_import_details()
return (
f"{self.importer_filename}:{details.line_number} "
f"{details.line_contents}"
)
@property
def importer_filename(self) -> str:
return self.importer.replace(".", "/") + ".py"
@property
def imported_filename(self) -> str:
return self.imported.replace(".", "/") + ".py"
def error_message(self) -> str:
raise NotImplementedError("Must be implemented in subclasses.")
def group_key(self) -> str:
raise NotImplementedError("Must be implemented in subclasses.")
@dataclass
class NonPublicImportViolation(ImportViolation):
def error_message(self) -> str:
return "A module imported outside of the package is not public."
def group_key(self) -> str:
return self.imported
@dataclass
class NotDependentImportViolation(ImportViolation):
def error_message(self) -> str:
return (
f"Package {domain_package(self.importer)} implicitly depends on "
f"{domain_package(self.imported)}."
)
def group_key(self) -> str:
return f"{domain_package(self.importer)}:{domain_package(self.imported)}"
def domain_package(import_path: str):
return import_path.split(".")[1]
|
[
"roman.imankulov@gmail.com"
] |
roman.imankulov@gmail.com
|
7d9ceb951227980f54371141d23133314a006bc8
|
8a25ada37271acd5ea96d4a4e4e57f81bec221ac
|
/home/pi/GrovePi/Software/Python/others/temboo/Library/Zoho/CRM/GenerateAuthToken.py
|
6564e3744ec9a54d1fc5d48c38797f2b50e0244d
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
lupyuen/RaspberryPiImage
|
65cebead6a480c772ed7f0c4d0d4e08572860f08
|
664e8a74b4628d710feab5582ef59b344b9ffddd
|
refs/heads/master
| 2021-01-20T02:12:27.897902
| 2016-11-17T17:32:30
| 2016-11-17T17:32:30
| 42,438,362
| 7
| 8
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,234
|
py
|
# -*- coding: utf-8 -*-
###############################################################################
#
# GenerateAuthToken
# Generates an authentication token.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GenerateAuthToken(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GenerateAuthToken Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GenerateAuthToken, self).__init__(temboo_session, '/Library/Zoho/CRM/GenerateAuthToken')
def new_input_set(self):
return GenerateAuthTokenInputSet()
def _make_result_set(self, result, path):
return GenerateAuthTokenResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GenerateAuthTokenChoreographyExecution(session, exec_id, path)
class GenerateAuthTokenInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GenerateAuthToken
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, string) Your Zoho password.)
"""
super(GenerateAuthTokenInputSet, self)._set_input('Password', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Your Zoho CRM username.)
"""
super(GenerateAuthTokenInputSet, self)._set_input('Username', value)
class GenerateAuthTokenResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GenerateAuthToken Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_AuthenticationToken(self):
"""
Retrieve the value for the "AuthenticationToken" output from this Choreo execution. ((string) The authentication token returned from Zoho.)
"""
return self._output.get('AuthenticationToken', None)
class GenerateAuthTokenChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GenerateAuthTokenResultSet(response, path)
|
[
"lupyuen@gmail.com"
] |
lupyuen@gmail.com
|
abb34c25cbdcc7ced69a926540585e6977ff820e
|
b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf
|
/000880DataCampPython01Intro/dataCamp000880ch01p02ex01.py
|
effc6ed2c8aeb882398c095f23b7b7584daaeca4
|
[
"Apache-2.0"
] |
permissive
|
SafonovMikhail/python_000577
|
5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4
|
f2dccac82a37df430c4eb7425b5d084d83520409
|
refs/heads/master
| 2022-12-08T10:53:57.202746
| 2022-12-07T09:09:51
| 2022-12-07T09:09:51
| 204,713,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
'''
# Example, do not modify!
print(5 / 8)
# Print the sum of 7 and 10
'''
# Example, do not modify!
print(5 / 8)
# Print the sum of 7 and 10
print(7 + 10)
|
[
"ms33@inbox.ru"
] |
ms33@inbox.ru
|
47ce9e9f24be7f2de7cb55d7edf10d2ce08b6d6f
|
7a77bade054683f7c36c59c6e0640958960efeea
|
/Komodo-Edit-8/lib/mozilla/python/komodo/Crypto/SelfTest/Random/Fortuna/__init__.py
|
e7e3adef44e6dd6018da853dc806e39887326fd2
|
[] |
no_license
|
amaurial/mininet
|
4e8fd62ec1f0547d21fcbb60a3fde64d8855920b
|
d2761c075130c0f447a69bbb40b0e3fddc052eb6
|
refs/heads/master
| 2016-09-06T12:03:47.808851
| 2013-07-16T10:55:01
| 2013-07-16T10:55:01
| 11,447,348
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,763
|
py
|
# -*- coding: utf-8 -*-
#
# SelfTest/Random/Fortuna/__init__.py: Self-test for Fortuna modules
#
# Written in 2008 by Dwayne C. Litzenberger <dlitz@dlitz.net>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test for the Crypto.Random.Fortuna package"""
__revision__ = "$Id: __init__.py 4769 2010-04-09 17:53:50Z toddw $"
import os
def get_tests(config={}):
tests = []
import test_FortunaAccumulator; tests += test_FortunaAccumulator.get_tests(config=config)
import test_FortunaGenerator; tests += test_FortunaGenerator.get_tests(config=config)
import test_SHAd256; tests += test_SHAd256.get_tests(config=config)
return tests
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
|
[
"amaurial@uol.com.br"
] |
amaurial@uol.com.br
|
b320db840727aa5ecd16ba22b569f203a7c222e4
|
4201bc1c12ef6edaaf8a201ef3a1cc279889b768
|
/tests/recogners/visual/test_plot.py
|
cc6dd289324cc59a8f6299c1de6d953fb7899611
|
[
"MIT"
] |
permissive
|
BachiLi/recogners
|
30358df0d9b866ef8c298ff804689709a9e16638
|
945eb6119182d3b3f2d77c189b5b1c4f5306a9e3
|
refs/heads/master
| 2020-09-29T01:29:37.914280
| 2019-09-19T18:40:21
| 2019-09-19T18:40:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
import pytest
import torch
from recogners.visual import plot
def test_show_tensor():
t = torch.zeros(28, 28)
plot.show_tensor(t)
|
[
"gth.rosa@uol.com.br"
] |
gth.rosa@uol.com.br
|
e1a924aec30e81ab56964e1ad7f9bb4247ddb7ab
|
4522fc52bc43654aadd30421a75bae00a09044f0
|
/share/haley/validations.py
|
44287c17e5198ac05fa0bbd79a2cdfc4130a1a00
|
[] |
no_license
|
qesoalpe/anelys
|
1edb8201aa80fedf0316db973da3a58b67070fca
|
cfccaa1bf5175827794da451a9408a26cd97599d
|
refs/heads/master
| 2020-04-07T22:39:35.344954
| 2018-11-25T05:23:21
| 2018-11-25T05:23:21
| 158,779,332
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,789
|
py
|
from neo4j.v1 import GraphDatabase, basic_auth
from pprint import pprint
from decimal import Decimal
d8757_5 = GraphDatabase.driver('bolt://comercialpicazo.com', auth=basic_auth('alejandro', '47exI4'))
d5_session = d8757_5.session()
cfdis = list()
tps = list()
txs = list()
def get_txs(_month):
rr = d5_session.run('match (tx:bailey_transaction)-[:check]->() where tx.date starts with {month} '
'return tx.id as id, tx.date as date, tx.description as description, tx.value as value, '
'tx.mov_num as mov_num;',
{'month': _month})
_txs = list()
for rc in rr:
_txs.append({'id': rc['id'], 'date': rc['date'], 'description': rc['description'], 'value': rc['value'],
'mov_num': rc['mov_num']})
return _txs
def get_tx(tx):
for _tx in txs:
if _tx['id'] == tx['id']:
return _tx
else:
txs.append(tx)
return tx
def ensure_cfdi(cfdi):
for _cfdi in cfdis:
if _cfdi['uuid'] == cfdi['uuid']:
return _cfdi
else:
cfdis.append(cfdi)
return cfdi
def get_taxpayer(rfc):
for tp in tps:
if tp['rfc'] == rfc:
return tp
else:
tp = {'rfc': rfc}
tps.append(tp)
return tp
def inflate_cfdis(_txs):
for _tx in _txs:
rr = d5_session.run('match ({id:{id}})-[:cfdi]->(cfdi:haley_cfdi)-[:emitter]->(emitter) return cfdi.uuid as uuid, '
'cfdi.total as total, cfdi.datetime as datetime, cfdi.folio as folio, cfdi.voucher_effect as voucher_effect, '
'emitter.rfc as emitter_rfc;',
{'id': _tx['id']})
cfdis = list()
for rc in rr:
cfdi = {'uuid': rc['uuid'], 'datetime': rc['datetime'], 'voucher_effect': rc['voucher_effect'], 'total': rc['total']}
if rc['folio'] is not None:
cfdi['folio'] = rc['folio']
cfdi = ensure_cfdi(cfdi)
if 'emitter' not in cfdi and rc['emitter_rfc'] is not None:
cfdi['emitter'] = get_taxpayer(rc['emitter_rfc'])
cfdis.append(cfdi)
if len(cfdis) == 1:
_tx['cfdi'] = cfdis[0]
elif len(cfdis) > 1:
_tx['cfdis'] = cfdis
def inflate_txs(cfdi):
rr = d5_session.run('match (cfdi:haley_cfdi{uuid:{uuid}})<-[:cfdi]-(tx:bailey_transaction) return tx.id as id, tx.value as value, tx.date as date, tx.description as description, tx.mov_num as mov_num;',
{'uuid': cfdi['uuid']})
txs = list()
for rc in rr:
tx = get_tx({'id': rc['id'], 'value': rc['value'], 'description': rc['description'], 'mov_num': rc['mov_num']})
txs.append(tx)
if len(txs) == 1:
cfdi['tx'] = txs[0]
elif len(txs) > 1:
cfdi['txs'] = txs
def validation_1():
# (cfdi)<-[:cfdi]-(tx) count 1
no_pass = list()
for cfdi in cfdis:
if 'txs' in cfdi and len(cfdi['txs']) > 1:
no_pass.append(cfdi)
return no_pass
def validation_2():
# (tx)-[:cfdi]->(cfdi)-[:emitter]->(emitter) emitter unique
no_pass = list()
for tx in txs:
emitter = None
if 'cfdis' in tx:
for cfdi in tx['cfdis']:
if 'emitter' in cfdi:
if emitter is not None:
if cfdi['emitter']['rfc'] != emitter['rfc']:
no_pass.append(tx)
break
else:
emitter = cfdi['emitter']
return no_pass
def validation_3():
# tx.value == sum(cfdi.total)
no_pass = list()
# {'tx': tx, 'diference': diference}
from sarah.acp_bson import dictutils
dictutils.list_float_to_dec(txs)
for tx in txs:
total_cfdis = Decimal()
if 'cfdi' in tx:
if tx['cfdi']['voucher_effect'] == 'ingress':
total_cfdis = tx['cfdi']['total']
elif tx['cfdi']['voucher_effect'] == 'egress':
total_cfdis = -tx['cfdi']['total']
elif 'cfdis' in tx:
for cfdi in tx['cfdis']:
if cfdi['voucher_effect'] == 'ingress':
total_cfdis += cfdi['total']
elif cfdi['voucher_effect'] == 'egress':
total_cfdis -= cfdi['total']
if total_cfdis != -tx['value']:
no_pass.append({'tx': tx, 'difference': -tx['value'] - total_cfdis})
return no_pass
def validation_4():
# (tx)-[:cfdi]->(cfdi)-[:emitter]->(emitter)<-[:beneficiary]-(check)<-[:check]-(tx)
no_pass = list()
return no_pass
def validate():
validation = dict()
no_pass = validation_1()
if len(no_pass) > 0:
validation['validation_1'] = {'no_pass': no_pass}
no_pass = validation_2()
if len(no_pass) > 0:
validation['validation_2'] = {'no_pass': no_pass}
no_pass = validation_3()
if len(no_pass)>0:
validation['validation_3'] = {'no_pass': no_pass}
no_pass = validation_4()
if len(no_pass) > 0:
validation['validation_4'] = {'no_pass': no_pass}
return validation
def populate_from_txs_in_month(month):
txs.clear()
tps.clear()
cfdis.clear()
txs.extend(get_txs(month))
inflate_cfdis(txs)
for cfdi in cfdis:
inflate_txs(cfdi)
for tp in tps:
rr = d5_session.run('match (tp{rfc:{rfc}}) return tp.name as name limit 1;', {'rfc': tp['rfc']})
rc = rr.single()
if rc is not None and rc['name'] is not None:
tp['name'] = rc['name']
if __name__ == '__main__':
pprint(txs)
pprint(tps)
pprint(cfdis)
|
[
"qesoalpe@gmail.com"
] |
qesoalpe@gmail.com
|
64de11311bc599c3c5bed8ace84665103a3e169d
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/13164070.py
|
860a9755b3d757aebdb485b23f443e9c6f4283dc
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/13164070.py generated: Wed, 25 Jan 2017 15:25:33
#
# Event Type: 13164070
#
# ASCII decay Descriptor: {[[B_s0]nos => K+ K- (D~0 -> K+ pi-)]cc, [[B_s0]os => K- K+ (D0 -> K- pi+)]cc}
#
from Configurables import Generation
Generation().EventType = 13164070
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_D0KK,Kpi=PHSP.dec"
Generation().SignalRepeatedHadronization.CutTool = "LHCbAcceptance"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "LHCbAcceptance"
from Configurables import LHCbAcceptance
pgun.addTool( LHCbAcceptance )
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13164070
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
ad9fadfe929fbd742d5d3194a338c36c0e766e06
|
1498148e5d0af365cd7fd16197174174a7fa9800
|
/leetcode/t001629_2.py
|
fbcfa1dde1361cd3e1d9c422cfd37ca1ee62acc6
|
[] |
no_license
|
feiyanshiren/myAcm
|
59a2b80fe7e02787defcb152eee3eae26135322a
|
00c7082d5143ddf87aeeafbdb6ce29da46dc8a12
|
refs/heads/master
| 2023-09-01T12:12:19.866447
| 2023-09-01T09:09:56
| 2023-09-01T09:09:56
| 148,560,672
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 685
|
py
|
from typing import List
class Solution:
def slowestKey(self, releaseTimes: List[int], keysPressed: str) -> str:
max_k = ""
max_v = 0
releaseTimes.insert(0, 0)
for i in range(1, len(releaseTimes)):
s = releaseTimes[i] - releaseTimes[i - 1]
if s > max_v:
max_v = s
max_k = keysPressed[i - 1]
elif s == max_v:
if keysPressed[i - 1] > max_k:
max_k = keysPressed[i - 1]
print(max_v)
return max_k
s = Solution()
print(s.slowestKey([9, 29, 49, 50], "cbcd"))
print(s.slowestKey([12, 23, 36, 46, 62], "spuda"))
|
[
"feiyanshiren@163.com"
] |
feiyanshiren@163.com
|
c75429e425489c4fb78a4bfc6a4c93b281d9a415
|
0062ceae0071aaa3e4e8ecd9025e8cc9443bcb3b
|
/solved/17070.py
|
e14c99fcb45eac54e24f1a06821ec0c612691411
|
[] |
no_license
|
developyoun/AlgorithmSolve
|
8c7479082528f67be9de33f0a337ac6cc3bfc093
|
5926924c7c44ffab2eb8fd43290dc6aa029f818d
|
refs/heads/master
| 2023-03-28T12:02:37.260233
| 2021-03-24T05:05:48
| 2021-03-24T05:05:48
| 323,359,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 566
|
py
|
N = int(input())
board = [list(map(int, input().split())) for _ in range(N)]
dp = [[[0, 0, 0] for _ in range(N)] for _ in range(N)]
dp[0][1][0] = 1
for i in range(2, N):
if board[0][i]: break
dp[0][i][0] = 1
for i in range(1, N):
for j in range(1, N):
if not board[i][j]:
dp[i][j][0] = dp[i][j-1][0] + dp[i][j-1][2]
dp[i][j][1] = dp[i-1][j][1] + dp[i-1][j][2]
if not (board[i-1][j] or board[i][j-1]):
dp[i][j][2] = dp[i-1][j-1][0] + dp[i-1][j-1][1] + dp[i-1][j-1][2]
print(sum(dp[N-1][N-1]))
|
[
"pyoun820@naver.com"
] |
pyoun820@naver.com
|
8b4f63c6a55804ce7d84505027839b601afd61d2
|
50948d4cb10dcb1cc9bc0355918478fb2841322a
|
/azure-mgmt-devtestlabs/azure/mgmt/devtestlabs/models/formula_fragment_py3.py
|
0f212fb85237d011fc8a711d59206b6152c43a00
|
[
"MIT"
] |
permissive
|
xiafu-msft/azure-sdk-for-python
|
de9cd680b39962702b629a8e94726bb4ab261594
|
4d9560cfd519ee60667f3cc2f5295a58c18625db
|
refs/heads/master
| 2023-08-12T20:36:24.284497
| 2019-05-22T00:55:16
| 2019-05-22T00:55:16
| 187,986,993
| 1
| 0
|
MIT
| 2020-10-02T01:17:02
| 2019-05-22T07:33:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,131
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .update_resource_py3 import UpdateResource
class FormulaFragment(UpdateResource):
"""A formula for creating a VM, specifying an image base and other parameters.
:param tags: The tags of the resource.
:type tags: dict[str, str]
:param description: The description of the formula.
:type description: str
:param author: The author of the formula.
:type author: str
:param os_type: The OS type of the formula.
:type os_type: str
:param formula_content: The content of the formula.
:type formula_content:
~azure.mgmt.devtestlabs.models.LabVirtualMachineCreationParameterFragment
:param vm: Information about a VM from which a formula is to be created.
:type vm: ~azure.mgmt.devtestlabs.models.FormulaPropertiesFromVmFragment
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
'description': {'key': 'properties.description', 'type': 'str'},
'author': {'key': 'properties.author', 'type': 'str'},
'os_type': {'key': 'properties.osType', 'type': 'str'},
'formula_content': {'key': 'properties.formulaContent', 'type': 'LabVirtualMachineCreationParameterFragment'},
'vm': {'key': 'properties.vm', 'type': 'FormulaPropertiesFromVmFragment'},
}
def __init__(self, *, tags=None, description: str=None, author: str=None, os_type: str=None, formula_content=None, vm=None, **kwargs) -> None:
super(FormulaFragment, self).__init__(tags=tags, **kwargs)
self.description = description
self.author = author
self.os_type = os_type
self.formula_content = formula_content
self.vm = vm
|
[
"lmazuel@microsoft.com"
] |
lmazuel@microsoft.com
|
f12dbc29ef6f39cbd86252c9fb94858a00f35d61
|
377e3a552fb807febc18ce036af77edbce93ca19
|
/binary trees/deepest_node_binary_tree.py
|
891638b927afd3d373b32ddde50d44b356bce55f
|
[] |
no_license
|
souravs17031999/100dayscodingchallenge
|
940eb9b6d6037be4fc0dd5605f9f808614085bd9
|
d05966f3e6875a5ec5a8870b9d2627be570d18d9
|
refs/heads/master
| 2022-10-29T11:05:46.762554
| 2022-09-28T13:04:32
| 2022-09-28T13:04:32
| 215,993,823
| 44
| 12
| null | 2022-08-18T14:58:50
| 2019-10-18T09:55:03
|
Python
|
UTF-8
|
Python
| false
| false
| 1,656
|
py
|
# Program for finding the deepest node in the tree.
# Input : Root of below tree
# 1
# / \
# 2 3
# / \ / \
# 4 5 6 7
# \
# 8
# Output : 8
#
# Input : Root of below tree
# 1
# / \
# 2 3
# /
# 6
# Output : 6
# ----------------------------------------------------------------------------------------------------
# As we know that height of the binary tree is the maximum depth of the binary tree.
# From calculating height, we know that max. depth which is the level of the deepest node
# and then we can print the node data at the level as found above.
# TIME : 0(N), N IS NODES OF BINARY TREE.
class new_Node:
def __init__(self, x):
self.data = x
self.left = None
self.right = None
def height(root):
if not root:
return 0
lheight = height(root.left)
rheight = height(root.right)
return max(lheight, rheight) + 1
def deepest_node(root, levels):
if not root:
return
if levels == 1:
print(root.data)
elif levels > 1:
deepest_node(root.left, levels - 1)
deepest_node(root.right, levels - 1)
if __name__ == '__main__':
root = new_Node(1)
root.left = new_Node(2)
root.right = new_Node(3)
root.left.left = new_Node(4)
root.right.left = new_Node(5)
root.right.right = new_Node(6)
root.right.left.right = new_Node(7)
root.right.right.right = new_Node(8)
root.right.left.right.left = new_Node(9)
levels = height(root)
deepest_node(root, levels)
|
[
"souravs_1999@rediffmail.com"
] |
souravs_1999@rediffmail.com
|
42ff61b7b1532947d2f6707b192005c61325f2de
|
644b019a4792b6c7d9e5352e6330069850cc07e7
|
/dentexchange/apps/libs/tests/test_login_required_for.py
|
88b4f1aa0075a77293dc5f58dee1d6c6a608927f
|
[
"BSD-3-Clause"
] |
permissive
|
jpchauvel/dentexchange
|
db0611c8c45365db30bdc15e3005c6eeac104c73
|
58ae303e842404fc9e1860f294ec8044a332bef3
|
refs/heads/master
| 2021-10-10T12:19:00.985034
| 2014-09-24T03:42:20
| 2014-09-24T03:42:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,072
|
py
|
# -*- coding:utf-8 -*-
import unittest
import mock
from ..decorators import login_required_for, EMPLOYER, EMPLOYEE
class LoginRequiredForTestCase(unittest.TestCase):
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employer_return_user_passes_test_with_check_returning_true(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = True
# action
returned_value = login_required_for(EMPLOYER)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertTrue(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employee_return_user_passes_test_with_check_returning_true(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for(EMPLOYEE)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertTrue(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employer_return_user_passes_test_with_check_returning_false(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for(EMPLOYER)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_employee_return_user_passes_test_with_check_returning_false(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = True
# action
returned_value = login_required_for(EMPLOYEE)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_should_return_false_if_user_doesnt_have_userregistration_attr(
self, user_passes_test):
# setup
user = object()
# action
returned_value = login_required_for(EMPLOYER)
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_should_return_user_passes_test_with_check_returning_false_for_login_types_list(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for((EMPLOYER, EMPLOYER,))
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertFalse(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
@mock.patch('libs.decorators.user_passes_test')
def test_login_required_for_should_return_user_passes_test_with_check_returning_true_for_login_types_list(
self, user_passes_test):
# setup
user = mock.Mock()
user.userregistration.is_employer = False
# action
returned_value = login_required_for((EMPLOYER, EMPLOYEE,))
# assert
check = user_passes_test.call_args[0][0]
passes = check(user)
self.assertTrue(passes)
self.assertEqual(id(user_passes_test.return_value), id(returned_value))
|
[
"jchauvel@gmail.com"
] |
jchauvel@gmail.com
|
ad2fd5634726e6272446d55a182d613f898857a5
|
2d0a0a1b6dad8657eaf844edbffe198bb1ff5c3e
|
/uvoyeur/daemon/mcast_proto.py
|
2ecd3e5698e24d8324e5f20bda4e388e3bc41267
|
[
"Apache-2.0"
] |
permissive
|
egustafson/uvoyeur
|
93ed7a4d795c74de477da39162285fdc7959f873
|
ed7a9c60933a898964a111c0e5311bab3172b21a
|
refs/heads/master
| 2021-01-19T22:33:43.403293
| 2015-08-29T19:06:56
| 2015-08-29T19:06:56
| 33,937,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,378
|
py
|
"""PROTOTYPE - Multicast Listener
REPLACE with full Mcast Listener in time
"""
import socket
import struct
import threading
MCAST_GRP = '224.1.1.1'
MCAST_PORT = 5007
class McastListener(threading.Thread):
def __init__(self, bus):
super(McastListener, self).__init__()
self.setDaemon(True)
self.bus = bus
self.shutdown = False
def subscribe(self):
self.bus.subscribe('start', self.do_start)
self.bus.subscribe('stop', self.stop)
def do_start(self):
print("McastListener - start")
self.start()
def stop(self):
self.shutdown = True
print("McastListener - shutdown")
def run(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('', MCAST_PORT))
mreq = struct.pack("4sl", socket.inet_aton(MCAST_GRP), socket.INADDR_ANY)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
sock.settimeout(1)
while not self.shutdown:
try:
msg = sock.recv(10240)
print("received: {0}".format(msg))
except socket.timeout:
#print("sock-tick")
pass
print("shutting down.")
## Local Variables:
## mode: python
## End:
|
[
"eg-git@elfwerks.org"
] |
eg-git@elfwerks.org
|
170439db6c8bc3f75bb70bbcfd21eb6a6a194663
|
6d967da5fd95aa5e66ddbb211da40041006ca5ec
|
/myvenv/Lib/site-packages/pip/_vendor/pep517/build.py
|
1c6ecbe79e7f8c4778ecb5872ddf1b4a9c1f59cd
|
[] |
no_license
|
gevorkyannaira/my-first-blog
|
96e4458045a1dd0aa9c1f3ec69f4c829428200e0
|
42ab12a8c2b0e402b5fa1b8e5a7cdd2629d06c16
|
refs/heads/master
| 2022-09-03T21:14:18.946448
| 2020-05-18T18:15:39
| 2020-05-18T18:15:39
| 264,909,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,645
|
py
|
"""Build a project using PEP 517 hooks.
"""
import argparse
import logging
import os
<<<<<<< HEAD
import contextlib
from pip._vendor import pytoml
import shutil
import errno
import tempfile
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
=======
from pip._vendor import toml
import shutil
from .envbuild import BuildEnvironment
from .wrappers import Pep517HookCaller
from .dirtools import tempdir, mkdir_p
from .compat import FileNotFoundError
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
log = logging.getLogger(__name__)
<<<<<<< HEAD
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
=======
def validate_system(system):
"""
Ensure build system has the requisite fields.
"""
required = {'requires', 'build-backend'}
if not (required <= set(system)):
message = "Missing required fields: {missing}".format(
missing=required-set(system),
)
raise ValueError(message)
def load_system(source_dir):
"""
Load the build system from a source dir (pyproject.toml).
"""
pyproject = os.path.join(source_dir, 'pyproject.toml')
with open(pyproject) as f:
pyproject_data = toml.load(f)
return pyproject_data['build-system']
def compat_system(source_dir):
"""
Given a source dir, attempt to get a build system backend
and requirements from pyproject.toml. Fallback to
setuptools but only if the file was not found or a build
system was not indicated.
"""
try:
system = load_system(source_dir)
except (FileNotFoundError, KeyError):
system = {}
system.setdefault(
'build-backend',
'setuptools.build_meta:__legacy__',
)
system.setdefault('requires', ['setuptools', 'wheel'])
return system
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
def _do_build(hooks, env, dist, dest):
get_requires_name = 'get_requires_for_build_{dist}'.format(**locals())
get_requires = getattr(hooks, get_requires_name)
reqs = get_requires({})
log.info('Got build requires: %s', reqs)
env.pip_install(reqs)
log.info('Installed dynamic build dependencies')
with tempdir() as td:
log.info('Trying to build %s in %s', dist, td)
build_name = 'build_{dist}'.format(**locals())
build = getattr(hooks, build_name)
filename = build(td, {})
source = os.path.join(td, filename)
shutil.move(source, os.path.join(dest, os.path.basename(filename)))
<<<<<<< HEAD
def mkdir_p(*args, **kwargs):
"""Like `mkdir`, but does not raise an exception if the
directory already exists.
"""
try:
return os.mkdir(*args, **kwargs)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def build(source_dir, dist, dest=None):
pyproject = os.path.join(source_dir, 'pyproject.toml')
dest = os.path.join(source_dir, dest or 'dist')
mkdir_p(dest)
with open(pyproject) as f:
pyproject_data = pytoml.load(f)
# Ensure the mandatory data can be loaded
buildsys = pyproject_data['build-system']
requires = buildsys['requires']
backend = buildsys['build-backend']
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
=======
def build(source_dir, dist, dest=None, system=None):
system = system or load_system(source_dir)
dest = os.path.join(source_dir, dest or 'dist')
mkdir_p(dest)
validate_system(system)
hooks = Pep517HookCaller(
source_dir, system['build-backend'], system.get('backend-path')
)
with BuildEnvironment() as env:
env.pip_install(system['requires'])
>>>>>>> e585743114c1741ec20dc76010f96171f3516589
_do_build(hooks, env, dist, dest)
parser = argparse.ArgumentParser()
parser.add_argument(
'source_dir',
help="A directory containing pyproject.toml",
)
parser.add_argument(
'--binary', '-b',
action='store_true',
default=False,
)
parser.add_argument(
'--source', '-s',
action='store_true',
default=False,
)
parser.add_argument(
'--out-dir', '-o',
help="Destination in which to save the builds relative to source dir",
)
def main(args):
# determine which dists to build
dists = list(filter(None, (
'sdist' if args.source or not args.binary else None,
'wheel' if args.binary or not args.source else None,
)))
for dist in dists:
build(args.source_dir, dist, args.out_dir)
if __name__ == '__main__':
main(parser.parse_args())
|
[
"gevorkyannaira5@gmail.com"
] |
gevorkyannaira5@gmail.com
|
e1f4ff4fde1cb02d80fa4d2b94bbc9519caf75d7
|
aa9f8d7b48dbe3cbecca5eaa2ad3bbea262dbf24
|
/qualite/commentaire.py
|
e75f0efb9eee8d63fab5ff076e2945b6b187921a
|
[] |
no_license
|
ssinayoko/Pyhton_Cours
|
5381a98c42cba021f34b482776933accd3442a6c
|
56b391aeb673b40b564c59053295ac68e2576a1c
|
refs/heads/master
| 2020-08-30T13:32:59.662715
| 2019-10-25T12:50:25
| 2019-10-25T12:50:25
| 218,395,682
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 345
|
py
|
# -*- coding: utf-8 -*-
def addition(a:int, b:int) -> int:
"""
Fonction qui effectue une addition
:param a: Operande 1
:param b: Operande 2
:type a: int
:type b: int
:Example:
addition(2,2)
>>> 4
"""
return a + b
help(addition)
print(addition("abc", "d"))
|
[
"mickael.bolnet@gmail.com"
] |
mickael.bolnet@gmail.com
|
0713d0f46f779f0a3d49497f7ce75e67e8204e77
|
67553d46a257631810f394908013b82c337e0fbd
|
/goat/temp/test.py
|
44fa0a89196104b700f5eda1a88be7e07ae37d41
|
[] |
no_license
|
bopopescu/goat-python
|
3f9d79eb1a9c2733345d699c98d82f91968ca5fa
|
c139488e2b5286033954df50ae1ca834144446f5
|
refs/heads/master
| 2022-11-21T11:25:27.921210
| 2020-03-06T01:02:57
| 2020-03-06T01:02:57
| 281,066,748
| 0
| 0
| null | 2020-07-20T09:00:08
| 2020-07-20T09:00:08
| null |
UTF-8
|
Python
| false
| false
| 1,067
|
py
|
import requests
from lxml import etree
#获取用来打开url的session
sessions = requests.session()
'''
给sssion设置代理,
因为一般的网站没有这个的话,
会拒绝我们的爬虫访问,
因此我们在这模拟谷歌浏览器访问
'''
sessions.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/27.0.1453.94 Safari/537.36'
#进行访问获取源码
r = sessions.get(
'https://baike.baidu.com/item/%E4%B8%AD%E5%9B%BD%E5%9C%B0%E9%9C%87%E5%B1%80%E9%83%91%E5%B7%9E%E5%9F%BA%E7%A1%80%E5%B7%A5%E7%A8%8B%E5%8B%98%E5%AF%9F%E7%A0%94%E7%A9%B6%E9%99%A2%E6%A1%A9%E5%9F%BA%E6%A3%80%E6%B5%8B%E4%B8%AD%E5%BF%83')
#给怕取下来的数据指定解码格式
r.encoding = 'utf-8'
text = r.text
#将网页源代码进行树结构化,以便于使用xpath
content = etree.HTML(text)
#使用xpath提取标签h1中的内容
h = content.xpath('//h1')
h1 = h[0].xpath('string(.)').strip()
print(h1)
d = content.xpath("//div[@label-module='lemmaSummary']")
d1 = d[0].xpath('string(.)').strip()
print(d1)
|
[
"34465021+jwfl724168@users.noreply.github.com"
] |
34465021+jwfl724168@users.noreply.github.com
|
23d890921b1774bbc78f6653f655c81c69604fe4
|
20d8a89124008c96fa59225926ce39f113522daa
|
/UL_NanoAODv8/2017/step1_cfg.py
|
122f3ba6db11246c43b88205c06a08dbd288cabb
|
[] |
no_license
|
MiT-HEP/MCProduction
|
113a132a2ff440e13225be518ff8d52b0136e1eb
|
df019d7a15717a9eafd9502f2a310023dcd584f5
|
refs/heads/master
| 2022-05-06T20:25:34.372363
| 2022-04-12T11:55:15
| 2022-04-12T11:55:15
| 37,586,559
| 5
| 7
| null | 2015-08-24T11:13:58
| 2015-06-17T09:45:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,862
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/PPD-RunIISummer20UL17wmLHEGEN-00001-fragment.py --python_filename step1_cfg.py --eventcontent RAWSIM,LHE --customise Configuration/DataProcessing/Utils.addMonitoring --datatier GEN,LHE --fileout file:step1.root --conditions 106X_mc2017_realistic_v6 --beamspot Realistic25ns13TeVEarly2017Collision --step LHE,GEN --geometry DB:Extended --era Run2_2017 --no_exec --mc -n 500
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing ('analysis')
options.register('jobNum', 0, VarParsing.multiplicity.singleton,VarParsing.varType.int,"jobNum")
options.register('chain', 'hbbg', VarParsing.multiplicity.singleton,VarParsing.varType.string,'chain')
options.parseArguments()
from Configuration.Eras.Era_Run2_2017_cff import Run2_2017
process = cms.Process('GEN',Run2_2017)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeVEarly2017Collision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
# Input source
firstLumi=10*options.jobNum+1 ## eventsPerJob/eventsPerLumi*jobNum +1
process.source = cms.Source("EmptySource",
firstLuminosityBlock = cms.untracked.uint32(firstLumi),
numberEventsInLuminosityBlock = cms.untracked.uint32(100)
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('Configuration/GenProduction/python/PPD-RunIISummer20UL17wmLHEGEN-00001-fragment.py nevts:500'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.RAWSIMoutput = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(1),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('GEN'),
filterName = cms.untracked.string('')
),
eventAutoFlushCompressedSize = cms.untracked.int32(20971520),
fileName = cms.untracked.string('file:step1.root'),
outputCommands = process.RAWSIMEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
process.LHEoutput = cms.OutputModule("PoolOutputModule",
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('LHE'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:step1_inLHE.root'),
outputCommands = process.LHEEventContent.outputCommands,
splitLevel = cms.untracked.int32(0)
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '106X_mc2017_realistic_v6', '')
print ("Loading chain",options.chain)
process.load("fragment_"+ options.chain)
#process.externalLHEProducer.args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/2017/13TeV/powheg/V2/gg_H_quark-mass-effects_NNPDF31_13TeV_M125/v1/gg_H_quark-mass-effects_NNPDF31_13TeV_M125_slc6_amd64_gcc630_CMSSW_9_3_0.tgz'),
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.RAWSIMoutput_step = cms.EndPath(process.RAWSIMoutput)
process.LHEoutput_step = cms.EndPath(process.LHEoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.endjob_step,process.RAWSIMoutput_step,process.LHEoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path).insert(0, process.generator)
# customisation of the process.
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
import os,random
random.seed = os.urandom(10) #~10^14
process.RandomNumberGeneratorService.externalLHEProducer.initialSeed = random.randint(0,999999)
process.RandomNumberGeneratorService.generator.initialSeed = random.randint(0,999999)
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
|
[
"andrea.marini@cern.ch"
] |
andrea.marini@cern.ch
|
4383b6408419e66072835b37d23d97ba2d7e7aae
|
84a19fe0b89bb19caa1641aeadc9623c1a181767
|
/abc/117/d.py
|
a815ba914748d87fb00aa584570fd4079a6fb59d
|
[
"MIT"
] |
permissive
|
wotsushi/competitive-programming
|
75abae653cff744189c53ad7e6dbd2ca1a62e3a8
|
17ec8fd5e1c23aee626aee70b1c0da8d7f8b8c86
|
refs/heads/master
| 2021-06-10T06:42:40.846666
| 2021-05-31T10:32:51
| 2021-05-31T10:32:51
| 175,002,279
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 645
|
py
|
# 入力
N, K = map(int, input().split())
A = list(map(int, input().split()))
# MSBから順に0or1を決めていく
# 0にする場合、下位ビットの最適値は貪欲に求まる
def s(i):
return sum((a >> (i - 1)) & 1 for a in A)
def g(L, b):
m = 2**(b - 1)
return (
0 if b == 0 else
g(L, b - 1) + m * s(b) if L < m else
g(L >> 1, b - 1) + m * max(
s(b),
N - s(b)
) if L == (2**b - 1) else
max(
g(m - 1, b - 1) + m * s(b),
g(L - m, b - 1) + m * (N - s(b))
)
)
ans = g(K, max(K, *A).bit_length())
# 出力
print(ans)
|
[
"wotsushi@gmail.com"
] |
wotsushi@gmail.com
|
204e9db02dcad09209555ab4e5630f11266c831d
|
a7b66311c2ce113789933ec3162f1128b2862f13
|
/app/waterQual/model/basinRef_box.py
|
fda72761a329cd4eb4e8dc8d10146234f9f760a6
|
[
"MIT"
] |
permissive
|
ChanJeunlam/geolearn
|
214b2c42359ea1164b39117fad2d7470adeb6d35
|
791caa54eb70920823ea7d46714dc8a3e7fa7445
|
refs/heads/master
| 2023-07-16T04:13:15.526364
| 2021-08-16T05:24:18
| 2021-08-16T05:24:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,108
|
py
|
from hydroDL.master import basins
from hydroDL.app import waterQuality
from hydroDL import kPath
from hydroDL.model import trainTS
from hydroDL.data import gageII, usgs
from hydroDL.post import axplot, figplot
import torch
import os
import json
import numpy as np
import matplotlib.pyplot as plt
wqData = waterQuality.DataModelWQ('basinRef')
outLst = ['basinRef-first50-opt1', 'basinRef-first50-opt2']
trainSet = 'first50'
testSet = 'last50'
pLst1, pLst2, errMatLst1, errMatLst2 = [list() for x in range(4)]
for outName in outLst:
master = basins.loadMaster(outName)
yP1, ycP1 = basins.testModel(outName, trainSet, wqData=wqData)
yP2, ycP2 = basins.testModel(outName, testSet, wqData=wqData)
errMatC1 = wqData.errBySiteC(ycP1, subset=trainSet, varC=master['varYC'])
errMatC2 = wqData.errBySiteC(ycP2, subset=testSet, varC=master['varYC'])
pLst1.append(ycP1)
pLst2.append(ycP2)
errMatLst1.append(errMatC1)
errMatLst2.append(errMatC2)
# figure out number of sample
info = wqData.info
siteNoLst = info['siteNo'].unique().tolist()
ycT = wqData.c
nc = ycT.shape[1]
countMat = np.full([len(siteNoLst), nc], 0)
for i, siteNo in enumerate(siteNoLst):
indS = info[info['siteNo'] == siteNo].index.values
for iC in range(nc):
countMat[i, iC] = np.count_nonzero(~np.isnan(ycT[indS, iC]))
# plot box
codePdf = usgs.codePdf
groupLst = codePdf.group.unique().tolist()
for group in groupLst:
codeLst = codePdf[codePdf.group == group].index.tolist()
indLst = [wqData.varC.index(code) for code in codeLst]
labLst1 = [codePdf.loc[code]['shortName'] +
'\n'+code for code in codeLst]
labLst2 = ['train opt1', 'train opt2', 'test opt2', 'test opt2']
dataBox = list()
for ic in indLst:
temp = list()
for errMat in errMatLst1+errMatLst2:
ind = np.where(countMat[:, ic] > 50)[0]
temp.append(errMat[:, ic, 1])
dataBox.append(temp)
title = 'correlation of {} group'.format(group)
fig = figplot.boxPlot(dataBox, label1=labLst1, label2=labLst2)
fig.suptitle(title)
fig.show()
|
[
"geofkwai@gmail.com"
] |
geofkwai@gmail.com
|
778767f7a0d4a6de15f577542a87425d58a718a4
|
f9e6fd01ba0c8b5eff3680fd4c38237a540be1d0
|
/config.py
|
f1f1dd0925475e74bad86fa3cfadbc91d9d865f9
|
[] |
no_license
|
kupacariibumu/NovelDownloader
|
757e2a339e946e26bdf30debc5a84c0d54672e3d
|
ded5a2a4243b5f171c8d9daa448931321de2ea81
|
refs/heads/master
| 2023-06-02T05:16:39.140316
| 2019-12-16T15:45:09
| 2019-12-16T15:45:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,717
|
py
|
from typing import Any, Dict, Union
import yaml
import utils
import websites
class Config:
def __init__(self, book: str, values: Dict[str, str]):
self.book = book
self.website = websites.from_config(values["website"])
self.values = values
def __getattr__(self, name):
return self.values.get(name)
def _get_website(config: dict):
print("[0] Custom")
for i, website in enumerate(websites.WEBSITES, 1):
print("[{}] {}".format(i, website.name))
website_index = utils.input_int("Website: ", 0, i)
if website_index > 0:
website = websites.WEBSITES[website_index - 1]
config["website"] = website.name
config.update(website.create_config())
else:
config["website"] = {
"toc_url": input("TOC url: "),
"toc_start": input("TOC start: "),
"toc_end": input("TOC end: "),
"toc_link": input("TOC link regex (optional): ") or 'href="(.*?)"',
"chapter_url": input("Chapter url: "),
"chapter_start": input("Chapter start: "),
"chapter_end": input("Chapter end: "),
}
def create_config(book: str):
config: Dict[str, Any] = {}
print("Creating new config for {}:".format(book))
_get_website(config)
name = input("Name? (optional) ")
if name:
config["name"] = name
with open(utils.get_config_file(book), "w") as f:
yaml.dump(config, f, default_flow_style=False)
print("Config created at:", utils.get_config_file(book))
print()
def load_config(book: str) -> Config:
with open(utils.get_config_file(book)) as f:
values = yaml.safe_load(f)
return Config(book, values)
|
[
"1benediktwerner@gmail.com"
] |
1benediktwerner@gmail.com
|
795ad238a3cee773a6c8a30b3dcfe36fc367688e
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02696/s652919986.py
|
d04309a3129fc7b8360255b5cdba0b3e126b0677
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 131
|
py
|
a,b,n = map(int, input().split())
if n >= b-1:
print( (a*(b-1)) // b - a * ((b-1)//b))
else:
print( (a*n) // b - a * (n//b))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
c2be790281c74b3a097b6e5fcd55262d0ffe0919
|
b3b68efa404a7034f0d5a1c10b281ef721f8321a
|
/Scripts/simulation/interactions/picker/situation_picker_interaction.py
|
5a6cfed4b77d651b4dc7af8cf2245a263c9e698e
|
[
"Apache-2.0"
] |
permissive
|
velocist/TS4CheatsInfo
|
62195f3333076c148b2a59f926c9fb5202f1c6fb
|
b59ea7e5f4bd01d3b3bd7603843d525a9c179867
|
refs/heads/main
| 2023-03-08T01:57:39.879485
| 2021-02-13T21:27:38
| 2021-02-13T21:27:38
| 337,543,310
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,819
|
py
|
# uncompyle6 version 3.7.4
# Python bytecode 3.7 (3394)
# Decompiled from: Python 3.7.9 (tags/v3.7.9:13c94747c7, Aug 17 2020, 18:58:18) [MSC v.1900 64 bit (AMD64)]
# Embedded file name: T:\InGame\Gameplay\Scripts\Server\interactions\picker\situation_picker_interaction.py
# Compiled at: 2017-08-29 22:16:16
# Size of source mod 2**32: 4323 bytes
from event_testing.resolver import InteractionResolver
from filters.tunable import FilterResult
from interactions.base.picker_interaction import SimPickerInteraction, AutonomousSimPickerSuperInteraction
from interactions.base.picker_strategy import SimPickerEnumerationStrategy
from sims4.tuning.tunable import TunableList, TunableVariant, TunablePackSafeReference
from sims4.tuning.tunable_base import GroupNames
from sims4.utils import flexmethod
from situations.situation_by_definition_or_tags import SituationSearchByDefinitionOrTagsVariant
from vet.vet_picker_strategy import VetCustomerPickerEnumerationStrategy
import services, sims4
class SituationSimsPickerMixin:
INSTANCE_TUNABLES = {'valid_situations':SituationSearchByDefinitionOrTagsVariant(description='\n Situations where the guest list will be collected to populate the picker.\n ',
tuning_group=GroupNames.PICKERTUNING),
'job_filter':TunableList(description='\n If provided, only looks for Sims with the specified jobs.\n ',
tunable=TunablePackSafeReference(manager=(services.get_instance_manager(sims4.resources.Types.SITUATION_JOB))),
tuning_group=GroupNames.PICKERTUNING)}
REMOVE_INSTANCE_TUNABLES = ('sim_filter', 'sim_filter_household_override', 'sim_filter_requesting_sim',
'include_uninstantiated_sims', 'include_instantiated_sims',
'include_actor_sim', 'include_target_sim')
@flexmethod
def _get_valid_sim_choices_gen(cls, inst, target, context, **kwargs):
inst_or_cls = inst if inst is not None else cls
for situation in cls.valid_situations.get_all_matching_situations():
for sim in situation.all_sims_in_situation_gen():
if cls.job_filter:
if situation.get_current_job_for_sim(sim) not in cls.job_filter:
continue
if inst_or_cls.sim_tests:
if inst:
interaction_parameters = inst.interaction_parameters.copy()
else:
interaction_parameters = kwargs.copy()
interaction_parameters['picked_item_ids'] = {
sim.sim_id}
resolver = InteractionResolver(cls, inst, target=target, context=context, **interaction_parameters)
if inst_or_cls.sim_tests.run_tests(resolver):
yield FilterResult(sim_info=(sim.sim_info))
else:
yield FilterResult(sim_info=(sim.sim_info))
class SituationSimsPickerInteraction(SituationSimsPickerMixin, SimPickerInteraction):
pass
class AutonomousSituationSimsPickerInteraction(SituationSimsPickerMixin, AutonomousSimPickerSuperInteraction):
INSTANCE_TUNABLES = {'choice_strategy': TunableVariant(description='\n Strategy to use for picking a Sim.\n ',
default='default_sim_picker',
default_sim_picker=(SimPickerEnumerationStrategy.TunableFactory()),
vet_customer_picker=(VetCustomerPickerEnumerationStrategy.TunableFactory()),
tuning_group=(GroupNames.PICKERTUNING))}
REMOVE_INSTANCE_TUNABLES = ('test_compatibility', )
def __init__(self, *args, **kwargs):
(super().__init__)(args, choice_enumeration_strategy=self.choice_strategy, **kwargs)
|
[
"cristina.caballero2406@gmail.com"
] |
cristina.caballero2406@gmail.com
|
3903880ef11dddcfb52a460e340e38f17acd4533
|
2aff23f7efc101969df2d13c5de91208f1153ff7
|
/pyexcel_matplotlib/__init__.py
|
63181b955335972638426181671ca5d3dffa487d
|
[
"BSD-3-Clause"
] |
permissive
|
mobanbot/pyexcel-matplotlib
|
7a8c12cb897173647377b2656cbac246f58793fe
|
8771fcf3cc82164b50dc7ec0314838bf3de63e3b
|
refs/heads/master
| 2021-06-19T16:17:29.541971
| 2017-07-13T07:17:31
| 2017-07-13T07:18:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 385
|
py
|
"""
pyexcel_matplotlib
~~~~~~~~~~~~~~~~~~~
chart drawing plugin for pyexcel
:copyright: (c) 2016-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for further details
"""
from pyexcel.plugins import PyexcelPluginChain
PyexcelPluginChain(__name__).add_a_renderer(
relative_plugin_class_path='plot.MatPlotter',
file_types=['svg', 'png']
)
|
[
"wangc_2011@hotmail.com"
] |
wangc_2011@hotmail.com
|
e01067024fe471232edab834f0a4d5da7c238f63
|
8906e04870524f190a11f3eb3caf8fe377ab3a24
|
/Chapter13/Chapter_13/obs_tower2/labeler/main.py
|
3e5a85559d1fdfeeedd004fdb99b45064e24782f
|
[
"MIT"
] |
permissive
|
PacktPublishing/Hands-On-Reinforcement-Learning-for-Games
|
8719c086c8410a2da2b4fb9852b029a4c8f67f60
|
609d63ee5389b80b760a17f7f43abe632d99a9bb
|
refs/heads/master
| 2023-02-08T19:35:30.005167
| 2023-01-30T09:09:07
| 2023-01-30T09:09:07
| 231,567,217
| 54
| 32
|
MIT
| 2022-04-21T06:47:24
| 2020-01-03T10:43:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,940
|
py
|
"""
Web server for the data labeling tool.
This web server looks for a trained classifier in the
scripts/ directory.
If such a classifier is found, its outputs are shown as
part of the web interface.
"""
import io
import json
import os
import random
from PIL import Image
from flask import Flask, send_file, send_from_directory
import numpy as np
import torch
from obs_tower2.labels import LabeledImage, load_all_labeled_images
from obs_tower2.model import StateClassifier
from obs_tower2.recording import load_all_data, sample_recordings
app = Flask(__name__, static_url_path='')
labelled = load_all_labeled_images()
recordings = load_all_data()
CLASSIFIER_PATH = '../scripts/save_classifier.pkl'
if os.path.exists(CLASSIFIER_PATH):
classifier = StateClassifier()
classifier.load_state_dict(torch.load(CLASSIFIER_PATH, map_location='cpu'))
else:
classifier = None
@app.route('/assets/<path:path>')
def handle_asset(path):
return send_from_directory('assets', path)
@app.route('/')
def handle_root():
return send_from_directory('.', 'index.html')
@app.route('/sample')
def handle_sample():
return sample_new_name()
@app.route('/frame/<name>')
def handle_frame(name):
buffer = io.BytesIO()
load_frame(name).save(buffer, 'PNG')
buffer.seek(0)
return send_file(buffer, mimetype='image/png')
@app.route('/key/<name>')
def handle_key(name):
return json.dumps(check_key(name))
@app.route('/classify/<name>')
def handle_classify(name):
if classifier is None:
return 'null'
img = np.array(load_frame(name))
inputs = torch.from_numpy(img[None])
outputs = torch.sigmoid(classifier(inputs)).detach().numpy()[0]
return json.dumps([float(x) for x in outputs])
@app.route('/save/<name>/<labels>')
def handle_save(name, labels):
frame = load_frame(name)
labels = [x == '1' for x in labels.split(',')]
img = LabeledImage(os.environ['OBS_TOWER_IMAGE_LABELS'], name, *labels)
img.save(frame)
labelled.append(img)
return 'success'
def sample_new_name():
while True:
rec = sample_recordings(recordings, 1)[0]
frame = random.randrange(rec.num_steps)
name = '%d_%d_%d' % (rec.seed, rec.uid, frame)
if any([x for x in labelled if x.name == name]):
continue
return name
def load_frame(name):
rec, frame = find_rec_frame(name)
return Image.fromarray(rec.load_frame(frame))
def check_key(name):
rec, frame = find_rec_frame(name)
for i in range(frame + 10, min(frame + 50, rec.num_steps), 5):
img = rec.load_frame(i)
if not (img[2] == 0).all():
return True
return False
def find_rec_frame(name):
parts = name.split('_')
seed = int(parts[0])
uid = int(parts[1])
frame = int(parts[2])
rec = next(x for x in recordings if x.seed == seed and x.uid == uid)
return rec, frame
if __name__ == '__main__':
app.run()
|
[
"josephs@packt.com"
] |
josephs@packt.com
|
9e96120750f7833d375a3b5ddf802df2de37e27f
|
9cf179388a901089cd547d36eedf0fd7a42eb9bd
|
/config/settings/base.py
|
9dbbf59ed9571d4f50ab378bb3a98edc393489b7
|
[] |
no_license
|
birkoss/stocks
|
94015013bfef9d19beabfea854891eac95fa1f8d
|
58d8f030add64962aea386ef72c50665381c6258
|
refs/heads/master
| 2023-02-16T02:29:55.500638
| 2020-12-22T19:30:01
| 2020-12-22T19:30:01
| 323,704,415
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,861
|
py
|
import json
import os
from django.core.exceptions import ImproperlyConfigured
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
with open('secrets.json') as f:
secrets = json.load(f)
except FileNotFoundError:
raise ImproperlyConfigured('Fill the secrets.json file')
def get_secret(setting, secrets=secrets):
'''
Get the secret variable or return explicit exception.
'''
try:
return secrets[setting]
except KeyError:
error_msg = 'Set the {0} environment → variable'.format(setting)
raise ImproperlyConfigured(error_msg)
SECRET_KEY = get_secret('SECRET_KEY')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'social_django',
'core',
'users',
'stocks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'config.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
],
},
},
]
WSGI_APPLICATION = 'config.wsgi.application'
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # nopep8
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # nopep8
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # nopep8
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # nopep8
},
]
AUTHENTICATION_BACKENDS = (
'social_core.backends.google.GoogleOAuth2',
'social_core.backends.facebook.FacebookOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_FACEBOOK_SCOPE = ['email']
SOCIAL_AUTH_FACEBOOK_PROFILE_EXTRA_PARAMS = {
'fields': 'id,name,email',
}
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'social_core.pipeline.user.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'social_core.pipeline.user.user_details',
)
LOGIN_URL = 'home'
LOGOUT_URL = 'logout'
LOGIN_REDIRECT_URL = 'home'
SOCIAL_AUTH_LOGIN_REDIRECT_URL = 'home'
SOCIAL_AUTH_FACEBOOK_KEY = get_secret('FACEBOOK_KEY')
SOCIAL_AUTH_FACEBOOK_SECRET = get_secret('FACEBOOK_SECRET')
SOCIAL_AUTH_GOOGLE_OAUTH2_KEY = get_secret('GOOGLE_KEY')
SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET = get_secret('GOOGLE_SECRET')
AUTH_USER_MODEL = 'users.User'
LANGUAGE_CODE = 'en-ca'
TIME_ZONE = 'America/Montreal'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
|
[
"admin@birkoss.com"
] |
admin@birkoss.com
|
fb19cc288cacbf45c79fb602182a6a2014c7a09a
|
4f2cdd9a34fce873ff5995436edf403b38fb2ea5
|
/Data-Structures/List/Part2/P007.py
|
efd86d663d29f875ee66ef39f98c36aa37486be6
|
[] |
no_license
|
sanjeevseera/Python-Practice
|
001068e9cd144c52f403a026e26e9942b56848b0
|
5ad502c0117582d5e3abd434a169d23c22ef8419
|
refs/heads/master
| 2021-12-11T17:24:21.136652
| 2021-08-17T10:25:01
| 2021-08-17T10:25:01
| 153,397,297
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 183
|
py
|
"""
Write a Python program to find the index of an item in a specified list.
"""
num =[10, '30', 4, -6]
try:
print(num.index('30'))
except:
print("Value not in List")
|
[
"seerasanjeev@gmail.com"
] |
seerasanjeev@gmail.com
|
61e8f3382590d817b94e5a1f6bb2299f795c7962
|
271886f348c3b72cd4b2a34ca456491d39bde520
|
/component-clustering/model-variance-exploration_2.py
|
143358c7e11e9548b3894d7d19ecc99fa6922c63
|
[
"MIT"
] |
permissive
|
tingard/Galaxy-builder-aggregation
|
4ca99c7473d31d9a0b6909e3ccc9b08559dc04b1
|
78fec76eeb2ab4b38e241b66fa5643e0002ba3a7
|
refs/heads/master
| 2021-06-28T06:46:00.676450
| 2019-07-17T16:45:56
| 2019-07-17T16:45:56
| 126,490,129
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,613
|
py
|
import numpy as np
import pandas as pd
import lib.galaxy_utilities as gu
import gzbuilderaggregation
from progress.bar import Bar
from multiprocessing import Pool
import argparse
import warnings
from astropy.utils.exceptions import AstropyWarning
warnings.simplefilter('ignore', category=AstropyWarning)
N_SPLITS = 5
def get_pa_from_arms(arms):
try:
p = arms[0].get_parent()
return p.get_pitch_angle(arms)
except IndexError:
return (np.nan, np.nan)
def get_splits_df(ss_id, val_id, dr8id):
gal, angle = gu.get_galaxy_and_angle(ss_id)
cls_for_gal = gu.classifications.query(
'subject_ids == {} | subject_ids == {}'.format(ss_id, val_id)
)
results = []
for i in range(N_SPLITS):
cls_sample = cls_for_gal.sample(30)
results.append(
gzbuilderaggregation.make_model(
cls_sample,
gal, angle,
)
)
disk_df = pd.DataFrame([
i[0]['disk'] for i in results if i[0]['disk'] is not None
])
disk_df.columns = 'disk_' + disk_df.columns
bulge_df = pd.DataFrame([
i[0]['bulge'] for i in results if i[0]['bulge'] is not None
])
bulge_df.columns = 'bulge_' + bulge_df.columns
bar_df = pd.DataFrame([
i[0]['bar'] for i in results if i[0]['bar'] is not None
])
bar_df.columns = 'bar_' + bar_df.columns
pa_df = pd.DataFrame(
[get_pa_from_arms(i[-1]) for i in results],
columns=('pa', 'sigma_pa')
)
gal_df = pd.concat((disk_df, bulge_df, bar_df, pa_df), axis=1, sort=False)
return gal_df
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=(
'Perform Shuffle split variance explortation'
' on aggregate models'
)
)
parser.add_argument('--nsplits', '-N', metavar='N', default=5,
help='Number of splits to use')
args = parser.parse_args()
N_SPLITS = int(args.nsplits)
dr8ids, ss_ids, validation_ids = np.load('lib/duplicate_galaxies.npy').T
out = []
to_iter = np.stack((ss_ids, validation_ids, dr8ids), axis=-1)
bar = Bar('Calculating aggregate models', max=len(dr8ids),
suffix='%(percent).1f%% - %(eta)ds')
try:
for row in to_iter:
try:
out.append(get_splits_df(*row))
except Exception as e:
print('\n', row[0], e)
bar.next()
bar.finish()
except KeyboardInterrupt:
pass
df = pd.concat(out, keys=dr8ids, sort=False)
df.to_pickle('model-variances.pkl')
|
[
"tklingard@gmail.com"
] |
tklingard@gmail.com
|
9a2a46b9e35529dc5ec63c6c719c5b2d2bb9dffc
|
c1c3dc2d8a3bbe12eb60f49f277f605793fa7758
|
/lesson06_multi_nasled/death_romb.py
|
4cfa169b0949ef6301c53b7097827fb658c970e2
|
[] |
no_license
|
Bulgakoff/UdemyNew
|
838e073b3ab61ae227fcc497f9ded5d6048f3077
|
97a27d0de164fcbd06def5e0edf1464ad46b5668
|
refs/heads/master
| 2020-09-23T03:29:20.789215
| 2019-12-09T11:04:28
| 2019-12-09T11:04:28
| 225,391,089
| 0
| 0
| null | 2019-12-09T11:04:29
| 2019-12-02T14:15:39
|
Python
|
UTF-8
|
Python
| false
| false
| 994
|
py
|
class Animal:
def set_health(self, health):
print('set in Animal')
class Carnivour(Animal):
def set_health(self, health):
super().set_health(health)# что бы не было двоцной инициализации базовых классов super().
print('set in Carnivour')
class Mammal(Animal):
def set_health(self, health):
super().set_health(health)# что бы не было двоцной инициализации базовых классов super().
print('set in Mammal')
class Dog(Carnivour, Mammal):
def set_health(self, health):
super().set_health(health)# что бы не было двоцной инициализации базовых классов super().
# Carnivour.set_health(self, health)
# Mammal.set_health(self, health)
print('set in Dog')
print('///////////////////////////////////////')
dog = Dog()
print(f'-----собака-------{dog.set_health(10)}--')
|
[
"hlbu@yandex.ru"
] |
hlbu@yandex.ru
|
fe4f9588384f0ada08e023ffb6a95d0de228157c
|
8703982937001523f125cb65a80002e5ebb95477
|
/config.py
|
4a511a489406af5835497a3304fce1fb98252df1
|
[
"MIT"
] |
permissive
|
meddulla/GNN-Tutorial-Recsys2015
|
12638d2510859b08fc3249be638e756704b69cf4
|
82918c5ec824c1580c8c61a2bb76f3cbab08f19c
|
refs/heads/master
| 2020-09-25T18:28:10.408024
| 2019-12-05T09:23:47
| 2019-12-05T09:23:47
| 226,063,496
| 0
| 0
|
MIT
| 2019-12-05T09:22:34
| 2019-12-05T09:22:34
| null |
UTF-8
|
Python
| false
| false
| 740
|
py
|
import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # sets device for model and PyTorch tensors
embed_dim = 128
batch_size = 1024
num_embeds = 52739
image_folder = 'data'
c_file = 'data/yoochoose-data/yoochoose-clicks.dat'
b_file = 'data/yoochoose-data/yoochoose-buys.dat'
c_index = ["session_id", "timestamp", "item_id", "category"]
b_index = ["session_id", "timestamp", "item_id", "price", "quantity"]
test_data_file = 'data/yoochoose-data/yoochoose-test.dat'
# Training parameters
num_workers = 4 # for data-loading
grad_clip = 5. # clip gradients at an absolute value of
print_freq = 10 # print training/validation stats every __ batches
checkpoint = None # path to checkpoint, None if none
|
[
"foamliu@yeah.net"
] |
foamliu@yeah.net
|
15b5a61186ff47009a360de4e660aa87ece8da91
|
cbbdbdfa3d69a11de5dbd80f860986c97ec10b67
|
/marrow/schema/transform/complex.py
|
391f78fc1519129ad02552bb1b941af914cf6966
|
[
"MIT"
] |
permissive
|
lokeshmeher/schema
|
757cbc837c91f124774d3a1562ceccc255f17026
|
3c7478d27f87a2f1a7f2c2da67beced4a76704cc
|
refs/heads/master
| 2021-06-04T18:50:42.461646
| 2016-02-24T04:15:04
| 2016-02-24T04:15:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,730
|
py
|
# encoding: utf-8
from __future__ import unicode_literals
import re
from inspect import isroutine
from ..compat import unicode
from .base import Concern, Transform, DataAttribute, Attribute
class TokenPatternAttribute(DataAttribute):
"""Lazy construction of the regular expression needed for token processing."""
def __get__(self, obj, cls=None):
# If this is class attribute (and not instance attribute) access, we return ourselves.
if obj is None:
return self
# Attempt to retrieve the cached value from the warehouse.
try:
return obj.__data__[self.__name__]
except KeyError:
pass
# No stored value? No problem! Let's calculate it.
separators = obj.separators
groups = obj.groups
quotes = obj.quotes
if groups and None not in groups:
groups = [None] + list(groups)
expression = ''.join((
# Trap possible leading space or separators.
('[\s%s]*' % (''.join(separators), )),
'(',
# Pass groups=('+','-') to handle optional leading + or -.
('[%s]%s' % (''.join([i for i in list(groups) if i is not None]), '?' if None in groups else '')) if groups else '',
# Match any amount of text (that isn't a quote) inside quotes.
''.join([(r'%s[^%s]+%s|' % (i, i, i)) for i in quotes]) if quotes else '',
# Match any amount of text that isn't whitespace.
('[^%s]+' % (''.join(separators), )),
')',
# Match possible separator character.
('[%s]*' % (''.join(separators), )),
))
value = (expression, re.compile(expression))
self.__set__(obj, value)
return value
class Token(Transform):
separators = Attribute(default=' \t')
quotes = Attribute(default="\"'")
groups = Attribute(default=[])
group = Attribute(default=None) # None or 'dict' or some other handler.
normalize = Attribute(default=None)
sort = Attribute(default=False)
cast = Attribute(default=list)
pattern = TokenPatternAttribute()
def native(self, value, context=None):
value = super(Token, self).native(value, context)
if value is None:
return None
pattern, regex = self.pattern
matches = regex.findall(value)
if isroutine(self.normalize):
matches = [self.normalize(i) for i in matches]
if self.sort:
matches.sort()
if not self.groups:
return self.cast(matches)
groups = dict([(i, list()) for i in self.groups])
if None not in groups:
groups[None] = list() # To prevent errors.
for i in matches:
if i[0] in self.groups:
groups[i[0]].append(i[1:])
else:
groups[None].append(i)
if self.group is dict:
return groups
if not self.group:
results = []
for group in self.groups:
results.extend([(group, match) for match in groups[group]])
return self.cast(results)
return self.group([[match for match in groups[group]] for group in self.groups])
def foreign(self, value, context=None):
value = super(Token, self).foreign(value, context)
if value is None:
return None
def sanatize(keyword):
if not self.quotes:
return keyword
for sep in self.separators:
if sep in keyword:
return self.quotes[0] + keyword + self.quotes[0]
return keyword
if self.group is dict:
if not isinstance(value, dict):
raise Concern("Dictionary grouped values must be passed as a dictionary.")
return self.separators[0].join([((prefix or '') + sanatize(keyword)) for prefix, keywords in sorted(list(value.items())) for keyword in sorted(value[prefix])])
if not isinstance(value, (list, tuple, set)):
raise Concern("Ungrouped values must be passed as a list, tuple, or set.")
value = [sanatize(keyword) for keyword in value]
return self.separators[0].join(sorted(value) if self.sort else value)
# A lowercase-normalized ungrouped tag set processor, returning only unique tags.
tags = Token(separators=' \t,', normalize=lambda s: s.lower().strip('"'), cast=set)
# A tag search; as per tags but grouped into a dictionary of sets for normal (None), forced inclusion (+) or exclusion (-).
tag_search = Token(separators=' \t,', normalize=lambda s: s.lower().strip('"'), cast=set, groups=['+', '-'], group=dict)
# A search keyword processor which retains quotes and groups into a dictionary of lists; no normalization is applied.
terms = Token(groups=['+', '-'], group=dict)
# VETO: Extract
'''
class DateTimeTransform(Transform):
base = Attribute(defualt=datetime.datetime)
format = "%Y-%m-%d %H:%M:%S"
def __call__(self, value):
if not value:
return ''
return super(DateTimeTransform, self)(value.strftime(self.format))
def native(self, value):
value = super(DateTimeTransform, self).native(value)
return self.base.strptime(value, self.format)
'''
|
[
"alice@gothcandy.com"
] |
alice@gothcandy.com
|
3d056240ccc91d11d0fa994fade9566d83649476
|
0ebf38d311d11f2473db301d08d906cf1a5d8825
|
/testinfra/modules/blockdevice.py
|
88e152d3b10bfe7658b43e1fe2782fb65fd3de93
|
[
"Apache-2.0",
"CC-BY-ND-4.0"
] |
permissive
|
disser/testinfra
|
5b8baf35e36192f98ca879464e858eb06029df63
|
14af900fb305991cdf2b31b8825884955e0d8f2c
|
refs/heads/master
| 2022-11-13T17:45:19.118394
| 2020-07-02T15:25:24
| 2020-07-02T15:25:24
| 276,672,242
| 0
| 0
|
Apache-2.0
| 2020-07-02T14:46:20
| 2020-07-02T14:46:20
| null |
UTF-8
|
Python
| false
| false
| 4,144
|
py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from testinfra.modules.base import Module
from testinfra.utils import cached_property
class BlockDevice(Module):
"""Information for block device.
Should be used with sudo or under root.
If device is not a block device, RuntimeError is raised.
"""
def _data(self):
raise NotImplementedError
def __init__(self, device):
self.device = device
super().__init__()
@property
def is_partition(self):
"""Return True if the device is a partition.
>>> host.block_device("/dev/sda1").is_partition
True
>>> host.block_device("/dev/sda").is_partition
False
"""
return self._data['start_sector'] > 0
@property
def size(self):
"""Return size if the device in bytes.
>>> host.block_device("/dev/sda1").size
512110190592
"""
return self._data['size']
@property
def sector_size(self):
"""Return sector size for the device in bytes.
>>> host.block_device("/dev/sda1").sector_size
512
"""
return self._data['sector_size']
@property
def block_size(self):
"""Return block size for the device in bytes.
>>> host.block_device("/dev/sda").block_size
4096
"""
return self._data['block_size']
@property
def start_sector(self):
"""Return start sector of the device on the underlaying device.
Usually the value is zero for full devices and is non-zero
for partitions.
>>> host.block_device("/dev/sda1").start_sector
2048
>>> host.block_device("/dev/md0").start_sector
0
"""
return self._data['sector_size']
@property
def is_writable(self):
"""Return True if device is writable (have no RO status)
>>> host.block_device("/dev/sda").is_writable
True
>>> host.block_device("/dev/loop1").is_writable
False
"""
mode = self._data['rw_mode']
if mode == 'rw':
return True
if mode == 'ro':
return False
raise ValueError('Unexpected value for rw: %s' % mode)
@property
def ra(self):
"""Return Read Ahead for the device in 512-bytes sectors.
>>> host.block_device("/dev/sda").ra
256
"""
return self._data['read_ahead']
@classmethod
def get_module_class(cls, host):
if host.system_info.type == 'linux':
return LinuxBlockDevice
raise NotImplementedError
def __repr__(self):
return '<BlockDevice(path=%s)>' % self.device
class LinuxBlockDevice(BlockDevice):
@cached_property
def _data(self):
header = ['RO', 'RA', 'SSZ', 'BSZ', 'StartSec', 'Size', 'Device']
command = 'blockdev --report %s'
blockdev = self.run(command % self.device)
if blockdev.rc != 0 or blockdev.stderr:
raise RuntimeError("Failed to gather data: %s" % blockdev.stderr)
output = blockdev.stdout.splitlines()
if len(output) < 2:
raise RuntimeError("No data from %s" % self.device)
if output[0].split() != header:
raise RuntimeError('Unknown output of blockdev: %s' % output[0])
fields = output[1].split()
return {
'rw_mode': str(fields[0]),
'read_ahead': int(fields[1]),
'sector_size': int(fields[2]),
'block_size': int(fields[3]),
'start_sector': int(fields[4]),
'size': int(fields[5])
}
|
[
"phil@philpep.org"
] |
phil@philpep.org
|
c5adb35910a3801181d1a6c8535732b8f9d6cf51
|
0facb323be8a76bb4c168641309972fa77cbecf2
|
/Configurations/HWWSemiLepHighMass/nanoAODv5/v6_production/2017/NJET_biined_WJets/SKIM10/HMVAR10_Full_SBI/MassPoints/structure_M140_mu.py
|
72b4463434ca67ffe33f216b0179abf363733d62
|
[] |
no_license
|
bhoh/SNuAnalytics
|
ef0a1ba9fa0d682834672a831739dfcfa1e7486b
|
34d1fc062e212da152faa83be50561600819df0e
|
refs/heads/master
| 2023-07-06T03:23:45.343449
| 2023-06-26T12:18:28
| 2023-06-26T12:18:28
| 242,880,298
| 0
| 1
| null | 2020-02-25T01:17:50
| 2020-02-25T01:17:49
| null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
structure["DY"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["MultiV"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["WpWmJJ_EWK_QCD_noHiggs"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["top"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["Wjets"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["vbfHWWlnuqq_M125"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["ggHWWlnuqq_M125"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["QCD_MU"]={
"isSignal" : 0,
"isData" : 0 ,
}
structure["DATA"]={
"isSignal" : 0,
"isData" : 1 ,
}
structure["ggHWWlnuqq_M140"]={
"isSignal" : 1,
"isData" : 0 ,
}
structure["vbfHWWlnuqq_M140"]={
"isSignal" : 1,
"isData" : 0 ,
}
|
[
"soarnsoar@gmail.com"
] |
soarnsoar@gmail.com
|
4c21f061d0e7cd7fcb64320a3d50b43a7c06d22e
|
bba2bd15307d94707825057fe2790a72c707a363
|
/awesome_glue/bert_classifier.py
|
3a6808b7dfbf4131497a2415fec14b8310982d9e
|
[] |
no_license
|
Xalp/dne
|
c78e8ef2f730b129623ed3eaa27f93d2cf85d6f6
|
afa519eea9ccd29332c477d89b4691fc2520813b
|
refs/heads/master
| 2023-02-16T14:27:48.089160
| 2021-01-15T12:30:44
| 2021-01-15T12:30:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,974
|
py
|
import torch
import torch.nn.functional as F
from allennlp.models import Model
from allennlp.training.metrics import CategoricalAccuracy
from transformers import AdamW
from allennlp.modules.token_embedders import PretrainedTransformerEmbedder
from allennlp.modules.seq2vec_encoders import ClsPooler
from luna import ram_globalize
class BertClassifier(Model):
def __init__(self, vocab, num_labels):
super().__init__(vocab)
self.bert_embedder = PretrainedTransformerEmbedder('bert-base-uncased')
self.pooler = ClsPooler(self.bert_embedder.get_output_dim())
self.linear = torch.nn.Sequential(
torch.nn.Dropout(0.1),
torch.nn.Linear(in_features=768, out_features=num_labels))
self.accuracy = CategoricalAccuracy()
self.loss_function = torch.nn.CrossEntropyLoss()
def forward(self, sent, label=None):
bert_embeddings = self.bert_embedder(
token_ids=sent['tokens']['token_ids'],
type_ids=sent['tokens']['type_ids'],
mask=sent['tokens']['mask'])
bert_vec = self.pooler(bert_embeddings)
logits = self.linear(bert_vec)
output = {"logits": logits, "probs": F.softmax(logits, dim=1)}
if label is not None:
self.accuracy(logits, label)
output["loss"] = self.loss_function(logits, label)
return output
def get_metrics(self, reset=False):
return {'accuracy': self.accuracy.get_metric(reset)}
def get_optimizer(self):
optimizer = AdamW(self.parameters(), lr=2e-5, eps=1e-8)
# get_linear_schedule_with_warmup(
# optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total
# )
return optimizer
@ram_globalize()
def noise(tsr: torch.Tensor, scale=1.0):
return tsr
# if scale == 0:
# return tsr
# else:
# return tsr + torch.normal(0., tsr.std().item() * scale, tsr.size(), device=tsr.device)
|
[
"dugu9sword@163.com"
] |
dugu9sword@163.com
|
20c28a30e5a7a54696e106c9cce4973e2678a8dc
|
b921f8ffb559e90c2711f77dc8ceba960b721714
|
/rocket_engine/__init__.py
|
4cfbaab90c1017b1491be694e9f409501465594e
|
[
"BSD-2-Clause-Views"
] |
permissive
|
xando/django-rocket-engine
|
7b8af49d2665cd213b75b9bcc4a9ba405d63f339
|
3ef942cd0ddd2f88832725990bd0fe9bb07bbe84
|
refs/heads/master
| 2020-12-24T16:23:36.789339
| 2012-06-29T19:49:43
| 2012-06-29T19:49:43
| 3,860,429
| 1
| 0
| null | 2012-07-03T08:30:44
| 2012-03-28T22:46:21
|
Python
|
UTF-8
|
Python
| false
| false
| 2,838
|
py
|
import os
import sys
from django.core.handlers.wsgi import WSGIHandler
from django.core import signals
on_appengine_remote = os.getenv('SERVER_SOFTWARE','')\
.startswith('Google App Engine')
on_appengine = on_appengine_remote
os.path.abspath(os.curdir)
PROJECT_DIR = os.path.abspath(os.getcwd())
def get_appengine_sdk_path():
typical_sdk_paths = [
os.environ.get('APP_ENGINE_SDK',""),
'/usr/local/google_appengine',
'/Applications/GoogleAppEngineLauncher.app/Contents/Resources/GoogleAppEngine-default.bundle/Contents/Resources/google_appengine'
] + os.environ.get('PATH', '').split(os.pathsep)
# List of files which will be used as a test for SQK lookup.
is_appengine_sdk = lambda path: all([
x in os.listdir(path) for x in [
'appcfg.py',
'dev_appserver.py',
'google'
]
])
for path in typical_sdk_paths:
if os.path.exists(path) and is_appengine_sdk(path):
return path
sys.stderr.write(
'The Google App Engine SDK could not be found!\n'
"Make sure it's accessible via your PATH "
"environment and called google_appengine.\n"
)
sys.exit(1)
def setup_appendine_sdk():
try:
import dev_appserver
except ImportError:
sdk_path = get_appengine_sdk_path()
sys.path.append(sdk_path)
import dev_appserver
sys.path.extend(dev_appserver.EXTRA_PATHS)
sys.path.extend(dev_appserver.GOOGLE_SQL_EXTRA_PATHS)
def path_appendine_sdk():
if not os.environ.get('DJANGO_SETTINGS_MODULE'):
os.environ.update({'DJANGO_SETTINGS_MODULE': 'settings'})
if not on_appengine_remote:
# add SQLlite to allowed modules
from google.appengine.tools import dev_appserver
from google.appengine import dist27
dist27.MODULE_OVERRIDES = []
dev_appserver.HardenedModulesHook._WHITE_LIST_C_MODULES.extend(
('parser', '_ssl', '_io', '_sqlite3', 'os', '_os', 'tempfile'))
dev_appserver.HardenedModulesHook._MODULE_OVERRIDES['os'] = os.__dict__
dev_appserver.HardenedModulesHook._PY27_ALLOWED_MODULES.append('os')
dev_appserver.HardenedModulesHook._HardenedModulesHook__PY27_OPTIONAL_ALLOWED_MODULES = {}
dev_appserver.FakeFile.NOT_ALLOWED_DIRS = set([])
dev_appserver.FakeFile.IsFileAccessible = staticmethod(
lambda *args, **kwargs: True
)
else:
# loogging exceptions hook
from .utils import log_traceback
signals.got_request_exception.connect(log_traceback)
# add production site
import site
site.addsitedir(os.path.join(PROJECT_DIR, 'appengine_libs'))
if not on_appengine_remote:
setup_appendine_sdk()
path_appendine_sdk()
wsgi = WSGIHandler()
|
[
"sebastian.pawlus@gmail.com"
] |
sebastian.pawlus@gmail.com
|
71dab8b1dc580f2d08efed954c2be67f8bdb700e
|
72b1d8b44520d1757d379d8013eb3912b005bef3
|
/ml/visualizations/word_cloud/demo_cloud.py
|
85787dfd91e13e926b6a51745f2ce8dd67c2ce84
|
[] |
no_license
|
joshuaNewman10/ml
|
14d8d5821bd952e77272b740cf05cef69ebee383
|
3ec43868004d421814f8e056205e77a2b8cb92dc
|
refs/heads/master
| 2021-04-03T06:29:33.655495
| 2018-09-17T19:03:40
| 2018-09-17T19:03:40
| 124,795,304
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 757
|
py
|
import matplotlib.pyplot as plt
from argparse import ArgumentParser
from wordcloud import WordCloud, STOPWORDS
def main(text_file_path):
print('hey')
text = open(text_file_path).read()
# Generate a word cloud image
wordcloud = WordCloud(stopwords=STOPWORDS).generate(text)
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
# lower max_font_size
wordcloud = WordCloud(max_font_size=40, stopwords=STOPWORDS).generate(text)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--text_file_path', required=True)
args = parser.parse_args()
main(args.text_file_path)
|
[
"josh@teambanjo.com"
] |
josh@teambanjo.com
|
b9682dd26ca433882646316da95ace105a4ee492
|
c6382b3f6778edd5a64bfb2a4d22ff6e5e5c0f7d
|
/ipyparallel/client/_joblib.py
|
7098a73b1bfb54d9cd43aa248681c69d35e554a3
|
[
"BSD-3-Clause"
] |
permissive
|
195610087/ipyparallel
|
17f382498bad28b339a3ca1f0b479151e9c0c955
|
6cd55b00a520b3f299e7db88a08b78dcbe713af8
|
refs/heads/main
| 2023-09-06T07:29:36.302391
| 2021-11-15T10:01:57
| 2021-11-15T10:01:57
| 429,255,340
| 0
| 0
|
NOASSERTION
| 2021-11-18T01:24:20
| 2021-11-18T01:24:20
| null |
UTF-8
|
Python
| false
| false
| 2,308
|
py
|
"""joblib parallel backend for IPython Parallel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from joblib.parallel import AutoBatchingMixin
from joblib.parallel import ParallelBackendBase
import ipyparallel as ipp
class IPythonParallelBackend(AutoBatchingMixin, ParallelBackendBase):
def __init__(self, view=None, **kwargs):
super().__init__(**kwargs)
self._cluster_owner = False
self._client_owner = False
if view is None:
self._client_owner = True
try:
# load the default cluster
cluster = ipp.Cluster.from_file()
except FileNotFoundError:
# other load errors?
cluster = self._cluster = ipp.Cluster()
self._cluster_owner = True
cluster.start_cluster_sync()
else:
# cluster running, ensure some engines are, too
if not cluster.engines:
cluster.start_engines_sync()
rc = cluster.connect_client_sync()
rc.wait_for_engines(cluster.n or 1)
view = rc.load_balanced_view()
# use cloudpickle or dill for closures, if available.
# joblib tends to create closures default pickle can't handle.
try:
import cloudpickle # noqa
except ImportError:
try:
import dill # noqa
except ImportError:
pass
else:
view.client[:].use_dill()
else:
view.client[:].use_cloudpickle()
self._view = view
def effective_n_jobs(self, n_jobs):
"""A View can run len(view) jobs at a time"""
return len(self._view)
def terminate(self):
"""Close the client if we created it"""
if self._client_owner:
self._view.client.close()
if self._cluster_owner:
self._cluster.stop_cluster_sync()
def apply_async(self, func, callback=None):
"""Schedule a func to be run"""
future = self._view.apply_async(func)
if callback:
future.add_done_callback(lambda f: callback(f.result()))
return future
|
[
"benjaminrk@gmail.com"
] |
benjaminrk@gmail.com
|
24a9abcd14ccf38aa4edade81d64a646ca06c078
|
282ec49f8ce8aa176c24e4f13a8852c9b0752e4a
|
/jumble/gtkmm/SConstruct
|
bdf8811058ae5ba72e9070f2535846020b8fdf25
|
[] |
no_license
|
montreal91/workshop
|
b118b9358094f91defdae1d11ff8a1553d67cee6
|
8c05e15417e99d7236744fe9f960f4d6b09e4e31
|
refs/heads/master
| 2023-05-22T00:26:09.170584
| 2023-01-28T12:41:08
| 2023-01-28T12:41:08
| 40,283,198
| 3
| 1
| null | 2023-05-01T20:19:11
| 2015-08-06T03:53:44
|
C++
|
UTF-8
|
Python
| false
| false
| 238
|
env = Environment()
env.ParseConfig("pkg-config --cflags --libs gtkmm-3.0")
sources = [
"main.cc",
"helloworld.cc"
]
flags = [
"-std=c++11",
"-Wall",
]
env.Program(target="gtk-hello-1", CXXFLAGS=flags, source=sources)
|
[
"nefedov.alexander91@yandex.ru"
] |
nefedov.alexander91@yandex.ru
|
|
1f4d2eb377e64e487faba3cdf2c21c6ecabc8bbe
|
775f887ab0933c8bb9263febceb702974966bb48
|
/packages/pyright-internal/src/tests/samples/genericTypes46.py
|
1395effc3f8c0f8d6f844f6afd1e87a98e644aac
|
[
"LicenseRef-scancode-generic-cla",
"MIT"
] |
permissive
|
isabella232/pyright
|
160a4d9ce366cb61946949f9d5aebe7457539c67
|
a192486099503353413e02078c41d0d82bd696e8
|
refs/heads/master
| 2023-03-13T05:04:51.852745
| 2021-03-03T07:51:18
| 2021-03-03T07:51:18
| 344,101,663
| 0
| 0
|
NOASSERTION
| 2021-03-03T11:24:10
| 2021-03-03T11:21:38
| null |
UTF-8
|
Python
| false
| false
| 413
|
py
|
# This sample tests the assignment of constrained TypeVars to a union
# that allows for all of the types in the constraint.
from typing import TypeVar, Union
def func(a: Union[int, float]):
...
_T1 = TypeVar("_T1", int, float)
def func1(a: _T1, b: _T1):
return func(a)
_T2 = TypeVar("_T2", int, float, complex)
def func2(a: _T2, b: _T2):
# This should generate an error.
return func(a)
|
[
"erictr@microsoft.com"
] |
erictr@microsoft.com
|
29e734cef324fc14d43b25079376467dbf6b876d
|
41523dd4871e8ed1043d2b3ddf73417fcbdde209
|
/day16/zuoye.py
|
3a03ad98b7fd77f925c21ebaa2e63183f5f27041
|
[] |
no_license
|
WayneChen1994/Python1805
|
2aa1c611f8902b8373b8c9a4e06354c25f8826d6
|
a168cd3b7749afc326ec4326db413378fd3677d5
|
refs/heads/master
| 2020-03-30T23:19:00.773288
| 2018-11-02T10:47:40
| 2018-11-02T10:47:40
| 151,697,105
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,299
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# author:Wayne
'''
使用while循环遍历list,tuple,dict,set
使用try……except……
'''
from typing import Iterable
# 传入的参数若是一个可迭代类型的对象,则将其遍历打印
def printIterByWhile(obj):
# 判断所传参数是否为可迭代对象
if isinstance(obj, Iterable):
# 进一步判断该可迭代对象是否为字典,因为字典需要同时遍历Key和Value
if isinstance(obj, dict):
aiter = iter(obj.items())
else:
aiter = iter(obj)
while True:
try:
# 迭代输出
print(next(aiter))
except StopIteration:
# 一旦捕捉到此异常,则说明遍历结束,直接break跳出循环
break
else:
print("所给参数不是可迭代类型")
if __name__ == "__main__":
alist = [x for x in range(39)]
atuple = (1, 2, 3, 4, 5, 6, 7)
aset = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
adict = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}
astr = "qwertyuiopasdfghjklzxcvbnm"
printIterByWhile(alist)
printIterByWhile(atuple)
printIterByWhile(aset)
printIterByWhile(adict)
printIterByWhile(astr)
printIterByWhile(123)
|
[
"waynechen1994@163.com"
] |
waynechen1994@163.com
|
06be33f39bb529376eefc5bb5e39140cf58c3760
|
0503295fd59e32bfda7f8fdf4f73a89217ad00bb
|
/non_geometry_features.py
|
8b117dc1282a361a9ad94a1ea4973b8525367bad
|
[] |
no_license
|
xinyu1905/Nomad2018
|
f00e884c8f14bf1fcc4cf675a757b59a7f0dd3e8
|
864a2571f29e74821dbe6220a0143cdf97fac27c
|
refs/heads/master
| 2020-03-19T01:22:23.514215
| 2018-02-01T19:00:37
| 2018-02-01T19:00:37
| 135,540,432
| 0
| 1
| null | 2018-05-31T06:18:26
| 2018-05-31T06:18:26
| null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
import logging
import numpy as np
import global_flags_constanst as gfc
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter("%(asctime)s %(name)-12s %(levelname)-8s %(message)s")
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(gfc.LOGGING_LEVEL)
def add_number_of_symmetries(space_group_feature):
n = len(space_group_feature)
symmetries_data = np.zeros((n, 1))
for i in range(n):
sg = int(space_group_feature[i])
logger.info("space group: {0}; number of symmetries: {1}".format(sg,
gfc.SPACE_GROUP_PROPERTIES[sg]))
symmetries_data[i] = gfc.SPACE_GROUP_PROPERTIES[sg]
return symmetries_data
if __name__ == "__main__":
data = np.loadtxt("train.csv", delimiter=",", skiprows=1)
test_data = np.loadtxt("test.csv", delimiter=",", skiprows=1)
ids = data[:, 0].reshape(-1, 1)
space_group_feature = data[:, 1]
test_ids = test_data[:, 0].reshape(-1, 1)
test_space_group_feature = test_data[:, 1]
symmetries_data = add_number_of_symmetries(space_group_feature)
test_symmetries_data = add_number_of_symmetries(test_space_group_feature)
symmetries_data = np.hstack((ids, symmetries_data))
np.savetxt("train_symmetries_data.csv", symmetries_data, delimiter=",")
test_symmetries_data = np.hstack((test_ids, test_symmetries_data))
np.savetxt("test_symmetries_data.csv", test_symmetries_data, delimiter=",")
|
[
"dawid.dul@gmail.com"
] |
dawid.dul@gmail.com
|
ab002aea2d27eadf5bcf53b4f0a3367f06297f7f
|
b977a59c246230cfccf40a4b57283ab5bc278770
|
/pyBN/learning/structure/constraint/fast_iamb.py
|
c3d930b055105dcd1195e8b083920c3948e5b649
|
[] |
no_license
|
baturayo/edbn
|
6f5c92b413121ededa461afd537a88f965f2af1d
|
4b3d3e2e97c1a35908c0237e5aac60e85d75a001
|
refs/heads/master
| 2020-04-29T17:13:36.986454
| 2019-03-25T16:58:32
| 2019-03-25T16:58:32
| 176,291,155
| 0
| 0
| null | 2019-03-18T13:23:44
| 2019-03-18T13:23:43
| null |
UTF-8
|
Python
| false
| false
| 4,324
|
py
|
"""
*********
Fast-IAMB
Algorithm
*********
For Feature Selection (from [1]):
"A principled solution to the feature selection problem is
to determine a subset of attributes that can "shield" (render
independent) the attribute of interest from the effect of
the remaining attributes in the domain. Koller and Sahami
[4] first showed that the Markov blanket of a given target attribute
is the theoretically optimal set of attributes to predict
its value...
Because the Markov blanket of a target attribute T renders
it statistically independent from all the remaining attributes
(see the Markov blanket definition below), all information
that may influence its value is stored in the values
of the attributes of its Markov blanket. Any attribute
from the feature set outside its Markov blanket can be effectively
ignored from the feature set without adversely affecting
the performance of any classifier that predicts the
value of T"
References
----------
[1] Yaramakala and Maragritis, "Speculative Markov Blanket
Discovery for Optimal Feature Selection"
[2] Tsarmardinos, et al. "Algorithms for Large Scale
Markov Blanket Discovery"
"""
from __future__ import division
import numpy as np
from pyBN.utils.data import unique_bins, replace_strings
from pyBN.utils.independence_tests import are_independent, mi_test
def fast_iamb(data, k=5, alpha=0.05, feature_selection=None, debug=False):
"""
From [1]:
"A novel algorithm for the induction of
Markov blankets from data, called Fast-IAMB, that employs
a heuristic to quickly recover the Markov blanket. Empirical
results show that Fast-IAMB performs in many cases
faster and more reliably than existing algorithms without
adversely affecting the accuracy of the recovered Markov
blankets."
Arguments
---------
*data* : a nested numpy array
*k* : an integer
The max number of edges to add at each iteration of
the algorithm.
*alpha* : a float
Probability of Type I error
Returns
-------
*bn* : a BayesNet object
Effects
-------
None
Notes
-----
- Currently does not work. I think it's stuck in an infinite loop...
"""
# get values
value_dict = dict(zip(range(data.shape[1]),
[list(np.unique(col)) for col in data.T]))
# replace strings
data = replace_strings(data)
n_rv = data.shape[1]
Mb = dict([(rv,[]) for rv in range(n_rv)])
N = data.shape[0]
card = dict(zip(range(n_rv),unique_bins(data)))
#card = dict(zip(range(data.shape[1]),np.amax(data,axis=0)))
if feature_selection is None:
_T = range(n_rv)
else:
assert (not isinstance(feature_selection, list)), 'feature_selection must be only one value'
_T = [feature_selection]
# LEARN MARKOV BLANKET
for T in _T:
S = set(range(n_rv)) - {T}
for A in S:
if not are_independent(data[:,(A,T)]):
S.remove(A)
s_h_dict = dict([(s,0) for s in S])
while S:
insufficient_data = False
break_grow_phase = False
#### GROW PHASE ####
# Calculate mutual information for all variables
mi_dict = dict([(s,mi_test(data[:,(s,T)+tuple(Mb[T])])) for s in S])
for x_i in sorted(mi_dict, key=mi_dict.get,reverse=True):
# Add top MI-score variables until there isn't enough data for bins
if (N / card[x_i]*card[T]*np.prod([card[b] for b in Mb[T]])) >= k:
Mb[T].append(x_i)
else:
insufficient_data = True
break
#### SHRINK PHASE ####
removed_vars = False
for A in Mb[T]:
cols = (A,T) + tuple(set(Mb[T]) - {A})
# if A is independent of T given Mb[T], remove A
if are_independent(data[:,cols]):
Mb[T].remove(A)
removed_vars=True
#### FINALIZE BLANKET FOR "T" OR MAKE ANOTHER PASS ####
if insufficient_data and not removed_vars:
if debug:
print('Breaking..')
break
else:
A = set(range(n_rv)) - {T} - set(Mb[T])
#A = set(nodes) - {T} - set(Mb[T])
S = set()
for a in A:
cols = (a,T) + tuple(Mb[T])
if are_independent(data[:,cols]):
S.add(a)
if debug:
print('Done with %s' % T)
if feature_selection is None:
# RESOLVE GRAPH STRUCTURE
edge_dict = resolve_markov_blanket(Mb, data)
# ORIENT EDGES
oriented_edge_dict = orient_edges_MB(edge_dict,Mb,data,alpha)
# CREATE BAYESNET OBJECT
bn=BayesNet(oriented_edge_dict,value_dict)
return BN
else:
return Mb[_T]
|
[
"stephen.pauwels@uantwerpen.be"
] |
stephen.pauwels@uantwerpen.be
|
3eecd1395fded62f776353bc6b65ee0e4ea3aadd
|
d0530d181fac2d5a1d04afaee1976ab8d74ed7dd
|
/argparse_demo.py
|
ea4313e8b581ccc365568dd6fc6d44333ed3bf61
|
[] |
no_license
|
guanguanboy/testPython
|
b587712c12f9a769872bebfe9eabcd4fca486690
|
4dbab2057a2b0264c3e38374283188d4fffc804f
|
refs/heads/master
| 2023-05-16T00:18:52.795866
| 2023-05-09T06:56:27
| 2023-05-09T06:56:27
| 337,029,277
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,010
|
py
|
"""
https://www.youtube.com/watch?v=q94B9n_2nf0
"""
import argparse
def fib(n):
a, b = 0, 1
for i in range(n):
a, b = b, a + b
return a
def Main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-v", "--verbose", action="store_true")
group.add_argument("-q", "--quiet", action="store_true")
parser.add_argument("num", help="The fibonacci number you wish to calculate.", type=int)
parser.add_argument("-o", "--output", help="Output result to a file.", action="store_true")
args = parser.parse_args()
result = fib(args.num)
if args.verbose:
print("The " + str(args.num) + "th fib number is " + str(result))
elif args.quiet:
print(result)
else:
print("Fib(" + str(args.num) +") = " + str(result))
if args.output:
f = open("fibonacci.txt", "a")
f.write(str(result) + '\n')
if __name__ == '__main__':
Main()
|
[
"517445163@qq.com"
] |
517445163@qq.com
|
b5cdd6986c2acbfb06b88e72bf32735d0a8eb004
|
2eb386991d9975f0f8440d90de26e950304ac42f
|
/HackTM2020/count_on_me/aes.py
|
1e77d5ba471ace032b7e2f0397b4b6c18787dae7
|
[] |
no_license
|
Quintec/CTFs2020
|
2816a66e8a486537c31e5ac25253840bc3a8ffe9
|
bdaa327c9f0b0ee16ff95bafcaf65f0df8acd8b9
|
refs/heads/master
| 2022-12-19T21:39:14.129702
| 2020-10-01T16:49:06
| 2020-10-01T16:49:06
| 281,812,929
| 1
| 0
| null | 2020-10-01T16:49:08
| 2020-07-23T00:37:44
| null |
UTF-8
|
Python
| false
| false
| 422
|
py
|
from Crypto.Cipher import AES
# this is a demo of the encyption / decryption proceess.
a = 'flagflagflagflag'
key = '1111111111111111111111111111111111111111111111111111111111111111'.decode('hex')
iv = '42042042042042042042042042042042'.decode('hex')
#encrypt
aes = AES.new(key,AES.MODE_CBC, iv)
c = aes.encrypt(a).encode("hex")
print(c)
#decrypt
aes = AES.new(key,AES.MODE_CBC, iv)
print(aes.decrypt(c.decode("hex")))
|
[
"zhongbrothers@gmail.com"
] |
zhongbrothers@gmail.com
|
052adf70d0033e9a2f09e7ada0b84bae66757da5
|
51f887286aa3bd2c3dbe4c616ad306ce08976441
|
/pybind/slxos/v17r_2_00/routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/nud/__init__.py
|
80a5b33532ac15c901e09a6277bcecb1a0451e96
|
[
"Apache-2.0"
] |
permissive
|
b2220333/pybind
|
a8c06460fd66a97a78c243bf144488eb88d7732a
|
44c467e71b2b425be63867aba6e6fa28b2cfe7fb
|
refs/heads/master
| 2020-03-18T09:09:29.574226
| 2018-04-03T20:09:50
| 2018-04-03T20:09:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,074
|
py
|
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import retry
class nud(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /routing-system/interface/ve/ipv6/ipv6-nd-ra/ipv6-intf-cmds/nd/nud. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__retry',)
_yang_name = 'nud'
_rest_name = 'nud'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__retry = YANGDynClass(base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'routing-system', u'interface', u've', u'ipv6', u'ipv6-nd-ra', u'ipv6-intf-cmds', u'nd', u'nud']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'interface', u'Ve', u'ipv6', u'nd', u'nud']
def _get_retry(self):
"""
Getter method for retry, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/nud/retry (container)
"""
return self.__retry
def _set_retry(self, v, load=False):
"""
Setter method for retry, mapped from YANG variable /routing_system/interface/ve/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/nud/retry (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_retry is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_retry() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """retry must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)""",
})
self.__retry = t
if hasattr(self, '_set'):
self._set()
def _unset_retry(self):
self.__retry = YANGDynClass(base=retry.retry, is_container='container', presence=False, yang_name="retry", rest_name="retry", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'retry attempts', u'cli-compact-syntax': None, u'cli-sequence-commands': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='container', is_config=True)
retry = __builtin__.property(_get_retry, _set_retry)
_pyangbind_elements = {'retry': retry, }
|
[
"badaniya@brocade.com"
] |
badaniya@brocade.com
|
cb1cfa2061ba0203e359d8a494bb3e4129ab3d99
|
e3178ef1fcc2f11b2608881b11a2545e1b830804
|
/pyfurby/restless.py
|
c9ce40bb815cf5cc596269aa2d9055b8a3a6678d
|
[
"MIT"
] |
permissive
|
matteoferla/pyfurby
|
2dd5b59b88e90777e693b3fd121981c4629a6e54
|
1383b93b432f09ac4fdd07562dedf66509b5809d
|
refs/heads/main
| 2023-03-08T16:54:19.393819
| 2021-02-20T19:36:13
| 2021-02-20T19:36:13
| 330,163,453
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
from typing import get_type_hints
import pydoc
class RestlessFurby: # restful...
def _resolve_request(self, cmd):
"""
Restful API Mode.
This action is trigged when any route that is not home is requested
:param cmd:
:return:
"""
try:
from flask import Flask, request
kwargs = request.args
print(f'Request {cmd}: {kwargs} from {request.remote_addr}')
getattr(self, cmd)(**kwargs)
return {'status': 'OK'}
except Exception as error:
return {'status': 'error',
'error': f'{error.__class__.__name__}: {error}'}
def _home(self):
"""
Restful API Mode.
This action is trigged when home is requested.
"""
reply = '## Furby Restful API options\n\n'
reply += 'To trigger a command, say `furby.yell`, use 198.162.1/0.xx:1998/yell?text=I%20hate%20you ' +\
'where xx is the furby\'s netword address\n'
reply += 'Namely, the route (part before the question mark) is the command, and its arguments are ' +\
'key=value separated by an ampersand (that is a URL query).\n' +\
'Using Pythod requests, just submit it as a dictionary\n'
for k in [k for k in dir(self) if k.find('_') != 0]:
attribute = getattr(self, k)
reply += f'###{k}\n>{get_type_hints(attribute)}\n{pydoc.getdoc(attribute)}\n\n'
return reply
def restful(self):
"""
The furby listens on port 1998, the year the Furby was introduced (Nawww).
Note that it is using Flask's internal app serving method, so is not suitable for use over the internet...
:return:
"""
from flask import Flask
import waitress
app = Flask(__name__)
app.add_url_rule('/<cmd>', 'command', self._resolve_request)
app.add_url_rule('/', 'home', self._home)
waitress.serve(app, port=1998, host='0.0.0.0')
|
[
"matteo.ferla@gmail.com"
] |
matteo.ferla@gmail.com
|
f731a090e91a2638b256e73ffab2478b8bd0d195
|
b6472217400cfce4d12e50a06cd5cfc9e4deee1f
|
/sites/top/api/rest/FenxiaoProductImageDeleteRequest.py
|
b41002248a940d2d3ed32f64a24e4503466cfdc4
|
[] |
no_license
|
topwinner/topwinner
|
2d76cab853b481a4963826b6253f3fb0e578a51b
|
83c996b898cf5cfe6c862c9adb76a3d6a581f164
|
refs/heads/master
| 2021-01-22T22:50:09.653079
| 2012-08-26T19:11:16
| 2012-08-26T19:11:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
'''
Created by auto_sdk on 2012-08-26 16:43:44
'''
from top.api.base import RestApi
class FenxiaoProductImageDeleteRequest(RestApi):
def __init__(self,domain,port):
RestApi.__init__(self,domain, port)
self.position = None
self.product_id = None
self.properties = None
def getapiname(self):
return 'taobao.fenxiao.product.image.delete'
|
[
"timo.jiang@qq.com"
] |
timo.jiang@qq.com
|
6c67af0b7b41e76d5d8eb85986cbe08b3eb3aaac
|
7233ff4c7cbb5d0f5e43a44800d0edddc2793b84
|
/Players/Combine2.py
|
682b4aa0bfe9b56f303869c63e5474dad35b2629
|
[] |
no_license
|
SoumitraAgarwal/Webscraping-Tennis-Grand
|
b3d97be13e67b285aa1303815ee58e5c693fa5df
|
45d0023e7c20ebcb230827f4a89c2669fcaee6fd
|
refs/heads/master
| 2021-01-02T08:23:28.373915
| 2017-09-30T23:02:44
| 2017-09-30T23:02:44
| 98,999,991
| 5
| 2
| null | 2017-09-11T21:32:45
| 2017-08-01T12:40:51
|
HTML
|
UTF-8
|
Python
| false
| false
| 506
|
py
|
import numpy as np
import cv2
import os
base = 'Pictures2/'
images = os.listdir(base)
for j in range(0,len(images), 50):
output = cv2.imread(base+images[j])
image1 = cv2.imread(base+images[j + 1])
cv2.addWeighted(image1, 1.0/100, output, 1.0/100, 0, output)
for i in range(j + 2,min(j + 100, len(images))):
# load the image
image1 = cv2.imread(base+images[i])
cv2.addWeighted(image1, 1.0/min(100, len(images) - j), output, 1, 0, output)
cv2.imwrite("OutputComb" + str(j) + ".jpg", output)
|
[
"agarwalsoumitra1504@gmail.com"
] |
agarwalsoumitra1504@gmail.com
|
b4633535520e64b12b41f1fcd1ab0e4c8e4330b6
|
41581af29553c967f1c3bfcce4cbb45128093b22
|
/problems/mrna/mrna.py
|
e9fb08aa812a183bbac2f027437a528d3b708673
|
[] |
no_license
|
pratishhegde/rosalind
|
818105992dd6169a09b737e5ec171a52cb58ccd8
|
bd06ae574d08f1a8a34c79d7da3ae8b10cb63dbe
|
refs/heads/master
| 2021-01-16T18:23:12.557850
| 2014-05-27T21:49:56
| 2014-05-27T21:49:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,515
|
py
|
# -*- coding: utf-8 -*-
"""
Problem
For positive integers a and n, a modulo n (written amodn in shorthand) is the
remainder when a is divided by n. For example, 29mod11=7 because 29=11×2+7.
Modular arithmetic is the study of addition, subtraction, multiplication, and
division with respect to the modulo operation. We say that a and b are congruent
modulo n if amodn=bmodn; in this case, we use the notation a≡bmodn.
Two useful facts in modular arithmetic are that if a≡bmodn and c≡dmodn, then
a+c≡b+dmodn and a×c≡b×dmodn. To check your understanding of these rules, you may
wish to verify these relationships for a=29, b=73, c=10, d=32, and n=11.
As you will see in this exercise, some Rosalind problems will ask for a (very
large) integer solution modulo a smaller number to avoid the computational
pitfalls that arise with storing such large numbers.
Given: A protein string of length at most 1000 aa.
Return: The total number of different RNA strings from which the protein could
have been translated, modulo 1,000,000. (Don't neglect the importance of the
stop codon in protein translation.)
Sample Dataset
MA
Sample Output
12
"""
import sys
sys.path.append('../../')
import rosalind_utils
def mrna():
seq = open("rosalind_mrna.txt").read().strip()
n = 1
for b in seq:
diff_code = len([x for x in rosalind_utils.GENCODE
if rosalind_utils.GENCODE[x]==b])
n = (n*diff_code) % 10**6
# stop codon
n = (n*3) % 10**6
print n
|
[
"sefakilic@gmail.com"
] |
sefakilic@gmail.com
|
da8ecc7359ae6638d5fb2798338f29b32ef9b009
|
34c2d81e0887d0381096717eebc28e4e1a2a2234
|
/setup.py
|
ef33f011bb234d9a151986128bfdd8c2597b838d
|
[] |
no_license
|
digital4rensics/canari
|
06634d9f5154f08b1d5407bd3972d9bd8d52c166
|
ec0c9edb74958a388e8bea279289995dbc9cf5e7
|
refs/heads/master
| 2021-01-18T15:35:52.236835
| 2012-12-25T05:58:29
| 2012-12-25T05:58:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from os import name
scripts = [
'src/scripts/canari',
'src/scripts/pysudo',
'src/scripts/dispatcher',
]
extras = [
'readline'
]
if name == 'nt':
scripts += ['%s.bat' % s for s in scripts]
setup(
name='canari',
author='Nadeem Douba',
version='0.5',
author_email='ndouba@gmail.com',
description='Rapid transform development and transform execution framework for Maltego.',
license='GPL',
packages=find_packages('src'),
package_dir={ '' : 'src' },
scripts=scripts,
zip_safe=False,
package_data={
'' : [ '*.conf', '*.plate' ]
},
install_requires=[
'pexpect',
'argparse'
],
dependency_links=[]
)
|
[
"ndouba@gmail.com"
] |
ndouba@gmail.com
|
0c2c82cd75939f224f4cb1936d9f3623cadbb4c0
|
4130bc72387f78ded674db06f04e759658dfbda4
|
/setup.py
|
1a331e0435675d937c4743cd37719b272f0d5192
|
[
"BSD-2-Clause"
] |
permissive
|
soukicz/nginx-amplify-agent
|
231d9f2371627ab836adf3baea37a6e2b9c0716c
|
484f20a902ed07dc4b50107c0ad6c5d7f14e4681
|
refs/heads/master
| 2021-01-12T11:12:22.770574
| 2016-11-02T17:25:22
| 2016-11-02T17:25:22
| 72,869,483
| 0
| 0
| null | 2016-11-04T17:18:48
| 2016-11-04T17:18:47
| null |
UTF-8
|
Python
| false
| false
| 1,464
|
py
|
# -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, '%s/amplify' % os.getcwd())
from setuptools import setup, find_packages
from amplify.agent.common.util.host import is_deb, is_rpm, is_amazon
__author__ = "Mike Belov"
__copyright__ = "Copyright (C) Nginx, Inc. All rights reserved."
__credits__ = ["Mike Belov", "Andrei Belov", "Ivan Poluyanov", "Oleg Mamontov", "Andrew Alexeev"]
__license__ = ""
__maintainer__ = "Mike Belov"
__email__ = "dedm@nginx.com"
data_files = [
('/etc/amplify-agent/', [
'etc/agent.conf.default',
]),
('/etc/logrotate.d/', ['etc/logrotate.d/amplify-agent'])
]
if is_rpm() or is_amazon():
data_files.append(
('/etc/init.d/', ['etc/chkconfig/amplify-agent'])
)
elif is_deb():
data_files.append(
('/etc/init.d/', ['etc/init.d/amplify-agent']),
)
setup(
name="nginx-amplify-agent",
version="0.40",
author="Mike Belov",
author_email="dedm@nginx.com",
description="NGINX Amplify Agent",
keywords="amplify agent nginx",
url="https:/amplify.nginx.com/",
packages=find_packages(exclude=[
"*.test", "*.test.*", "test.*", "test",
"tools", "tools.*", "packages", "packages.*"]),
package_data={'amplify': [
'gevent/*.so',
'psutil/*.so',
'*.so',
]},
data_files=data_files,
scripts=[
'nginx-amplify-agent.py'
],
entry_points={},
long_description='NGINX Amplify Agent',
)
|
[
"dedm@nginx.com"
] |
dedm@nginx.com
|
4c6cbdca46382716a0c157a4ee44adfc026d21b9
|
4908b1d34d69c1cb652f25049552562574e1075f
|
/2020/Day-24/Lobby_Layout/example.py
|
b97fbca0cf8b5178a0781e9cb2a4930254914c64
|
[
"MIT"
] |
permissive
|
sreekesari-vangeepuram/adventofcode
|
3d4ad98a25a30640182d928538b421e00ad8259d
|
645531be0208affe042ac0328105b9ef3cfc9dbf
|
refs/heads/main
| 2023-07-26T13:36:03.036721
| 2021-08-11T08:27:25
| 2021-08-11T08:27:25
| 317,850,039
| 1
| 0
|
MIT
| 2021-08-11T08:27:26
| 2020-12-02T12:08:13
|
Go
|
UTF-8
|
Python
| false
| false
| 882
|
py
|
#!/usr/bin/env python
from collections import defaultdict
# nw, se in YZ plane
# ne, sw in XZ plane
# w , e in XY plane
position = {
"nw": (0, +1, -1), "ne": (+1, 0, -1),
"w" : (-1, +1, 0), "e" : (+1, -1, 0),
"sw": (-1, 0, +1), "se": (0, -1, +1),
}
# `position` source: https://www.redblobgames.com/grids/hexagons/
# NW * NE
# * \ / *
# W * - * - * E
# * / \ *
# SW * SE
fmt_line = lambda line: line \
.replace("e", "e ") \
.replace("w", "w ") \
.split()
ins_list = list(map(fmt_line, open("sample.txt").read().strip().split("\n")))
tiles = defaultdict(int)
for ins in ins_list:
x = y = z = 0
for dx, dy, dz in [position[_in] for _in in ins]:
x += dx; y += dy; z += dz
tiles[x, y, z] ^= 1
print(f"Number of black sides facing-up: {sum(tiles.values())}")
|
[
"kesari.vangeepuram@gmail.com"
] |
kesari.vangeepuram@gmail.com
|
04479afbaa7d4c54e40051a2426054b6ca0c0aad
|
169d809f45dedcaa3c7b1b49912d8b025abe18d9
|
/date_connvert.py
|
7f68a922ad5d38c34dcb486b4b74a0557a63f1f1
|
[] |
no_license
|
bermec/challenges
|
8a82d1d38d1ed1a0fc3f258443bc0054efc977a6
|
9fb092f20f12b4eaa808e758f00f482a49346c88
|
refs/heads/master
| 2021-10-08T05:05:56.803332
| 2018-12-08T00:20:20
| 2018-12-08T00:20:20
| 109,448,713
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 503
|
py
|
import datetime
from datetime import date
def str2int(strng):
out_lst = []
for x in strng:
if x.isdigit():
x = int(x)
out_lst.append(x)
out_tup = tuple(out_lst)
return out_tup
a = '2015-07-01 2019-04-06'
a = a.split()
print('a ', a)
b = a[0].split('-')
print('b', b)
year = int(b[0])
month = int(b[1])
day = int(b[2])
z = datetime.date(year, month, day)
print(z)
print(type(z))
nice_z = date.strftime(z, '%#B, %#d, %Y')
print('nice_z: ', nice_z)
|
[
"rog@pynguins.com"
] |
rog@pynguins.com
|
43b23642d653c51031076e53d722728e53160630
|
09ce9635b0e74ba178e98efd0d5229a25995713e
|
/submissions/arc034/b.py
|
67f01974b68e9681ad402eff11836072f3cd9ee8
|
[
"Unlicense"
] |
permissive
|
m-star18/atcoder
|
7575f1e1f3ee1dfa4a765493eb17b4ef0ad5f1f0
|
08e475810516602fa088f87daf1eba590b4e07cc
|
refs/heads/main
| 2023-07-14T09:16:42.807150
| 2021-08-22T15:59:48
| 2021-08-22T15:59:48
| 364,458,316
| 1
| 0
|
Unlicense
| 2021-08-22T15:59:49
| 2021-05-05T04:13:03
|
Python
|
UTF-8
|
Python
| false
| false
| 408
|
py
|
import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n = int(readline())
ans = [0]
for check in range(max(0, n - 153), n):
cnt = check
check = str(check)
for i in range(len(check)):
cnt += int(check[i])
if cnt == n:
ans[0] += 1
ans.append(check)
for a in ans:
print(a)
|
[
"31807@toyota.kosen-ac.jp"
] |
31807@toyota.kosen-ac.jp
|
278dd8023b33a8ac37253da6d123844c226c6d0b
|
38372fcc2ca58798176267360ff07f886400bc7b
|
/core_arrears/filters.py
|
f5f875ecce205bf8110cb12b1ecce51054584da2
|
[] |
no_license
|
portman-asset-finance/_GO_PAF
|
4eb22c980aae01e0ad45095eb5e55e4cb4eb5189
|
ee93c49d55bb5717ff1ce73b5d2df6c8daf7678f
|
refs/heads/master
| 2020-09-21T05:22:10.555710
| 2019-11-28T16:44:17
| 2019-11-28T16:44:17
| 224,691,377
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,790
|
py
|
import datetime
from django.contrib.auth.models import User
from .models import arrears_summary_agreement_level, arrears_summary_arrear_level
from core.models import ncf_dd_schedule
import django_filters
class arrears_summary_agreement_level_Filter(django_filters.FilterSet):
arr_agreement_id = django_filters.CharFilter(lookup_expr='icontains')
arr_customercompanyname = django_filters.CharFilter(lookup_expr='icontains')
class Meta:
model = arrears_summary_agreement_level
exclude = ['agreementproducttierid']
class arrears_summary_arrear_level_Filter(django_filters.FilterSet):
def __init__(self, data=None, *args, **kwargs):
# if filterset is bound, use initial values as defaults
if data is not None:
# get a mutable copy of the QueryDict
data = data.copy()
for name, f in self.base_filters.items():
initial = f.extra.get('initial')
# filter param is either missing or empty, use initial as default
if not data.get(name) and initial:
data[name] = initial
super(arrears_summary_arrear_level_Filter, self).__init__(data, *args, **kwargs)
# Get current due date
initial_default_date_queryset = ncf_dd_schedule.objects.filter(dd_status_id='999')[:1].get()
ara_agreement_id = django_filters.CharFilter(lookup_expr='icontains')
ara_customercompanyname = django_filters.CharFilter(lookup_expr='icontains')
ara_due_date = django_filters.DateFilter()
ara_agent_id = django_filters.ModelChoiceFilter(queryset=User.objects.filter(groups__name='NCF_Collections_PrimaryAgents'),label=('Assigned'))
class Meta:
model = arrears_summary_arrear_level
exclude = ['agreementproducttierid']
|
[
"portman-asset-finance@outlook.com"
] |
portman-asset-finance@outlook.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.