blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cee359fe3b6a52b85b80a84045288cefe202b0df
|
a838d4bed14d5df5314000b41f8318c4ebe0974e
|
/sdk/iothub/azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/aio/_iot_dps_client.py
|
5fde1ad85d8fa119ba629237d54b5a8c495d85a4
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
scbedd/azure-sdk-for-python
|
ee7cbd6a8725ddd4a6edfde5f40a2a589808daea
|
cc8bdfceb23e5ae9f78323edc2a4e66e348bb17a
|
refs/heads/master
| 2023-09-01T08:38:56.188954
| 2021-06-17T22:52:28
| 2021-06-17T22:52:28
| 159,568,218
| 2
| 0
|
MIT
| 2019-08-11T21:16:01
| 2018-11-28T21:34:49
|
Python
|
UTF-8
|
Python
| false
| false
| 4,443
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional, TYPE_CHECKING
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
from ._configuration import IotDpsClientConfiguration
from .operations import Operations
from .operations import DpsCertificateOperations
from .operations import IotDpsResourceOperations
from .. import models
class IotDpsClient(object):
"""API for using the Azure IoT Hub Device Provisioning Service features.
:ivar operations: Operations operations
:vartype operations: azure.mgmt.iothubprovisioningservices.aio.operations.Operations
:ivar dps_certificate: DpsCertificateOperations operations
:vartype dps_certificate: azure.mgmt.iothubprovisioningservices.aio.operations.DpsCertificateOperations
:ivar iot_dps_resource: IotDpsResourceOperations operations
:vartype iot_dps_resource: azure.mgmt.iothubprovisioningservices.aio.operations.IotDpsResourceOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: The subscription identifier.
:type subscription_id: str
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = IotDpsClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._serialize.client_side_validation = False
self._deserialize = Deserializer(client_models)
self.operations = Operations(
self._client, self._config, self._serialize, self._deserialize)
self.dps_certificate = DpsCertificateOperations(
self._client, self._config, self._serialize, self._deserialize)
self.iot_dps_resource = IotDpsResourceOperations(
self._client, self._config, self._serialize, self._deserialize)
async def _send_request(self, http_request: HttpRequest, **kwargs: Any) -> AsyncHttpResponse:
"""Runs the network request through the client's chained policies.
:param http_request: The network request you want to make. Required.
:type http_request: ~azure.core.pipeline.transport.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to True.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.pipeline.transport.AsyncHttpResponse
"""
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
http_request.url = self._client.format_url(http_request.url, **path_format_arguments)
stream = kwargs.pop("stream", True)
pipeline_response = await self._client._pipeline.run(http_request, stream=stream, **kwargs)
return pipeline_response.http_response
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "IotDpsClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"noreply@github.com"
] |
scbedd.noreply@github.com
|
d385cc2f8288b4c5aa47248859593ff5ec03611b
|
67797ff7e63cd4dcafc5bd958f61fc872a5a6449
|
/tests/test_builds.py
|
0e4b5a7ec6a810fee438b7f94bc85f7871071168
|
[
"MIT"
] |
permissive
|
frigg/frigg-worker
|
394c23149c7109a914207de4a06608ec7884fe99
|
8c215cd8f5a27ff9f5a4fedafe93d2ef0fbca86c
|
refs/heads/master
| 2020-04-06T07:05:21.348689
| 2016-05-03T05:13:37
| 2016-05-03T05:13:37
| 25,468,616
| 4
| 3
| null | 2017-10-15T10:14:29
| 2014-10-20T13:51:38
|
Python
|
UTF-8
|
Python
| false
| false
| 6,527
|
py
|
# -*- coding: utf8 -*-
import unittest
from unittest import mock
from docker.helpers import ProcessResult
from docker.manager import Docker
from frigg_settings.model import FriggSettings
from frigg_worker.builds import Build
from frigg_worker.errors import GitCloneError
DATA = {
'id': 1,
'branch': 'master',
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/test-repo.git',
'owner': 'frigg',
'name': 'test-repo',
}
BUILD_SETTINGS_WITH_NO_SERVICES = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': [],
'coverage': {'path': 'coverage.xml', 'parser': 'python'}
})
BUILD_SETTINGS_ONE_SERVICE = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': ['redis-server'],
'coverage': None,
})
BUILD_SETTINGS_FOUR_SERVICES = FriggSettings({
'setup_tasks': [],
'tasks': ['tox'],
'services': ['redis-server', 'postgresql', 'nginx', 'mongodb'],
'coverage': None,
})
BUILD_SETTINGS_SERVICES_AND_SETUP = FriggSettings({
'setup_tasks': ['apt-get install nginx'],
'tasks': ['tox'],
'services': ['redis-server', 'postgresql', 'nginx', 'mongodb'],
'coverage': None,
})
BUILD_SETTINGS_WITH_AFTER_TASKS = FriggSettings({
'tasks': {
'tests': ['tox'],
'after_success': ['success_task'],
'after_failure': ['failure_task'],
},
})
WORKER_OPTIONS = {
'dispatcher_url': 'http://example.com/dispatch',
'dispatcher_token': 'tokened',
'hq_url': 'http://example.com/hq',
'hq_token': 'tokened',
}
GIT_ERROR = GitCloneError('UNKNOWN', '', '', True)
class BuildTests(unittest.TestCase):
def setUp(self):
self.docker = Docker()
self.build = Build(1, DATA, self.docker, WORKER_OPTIONS)
@mock.patch('docker.manager.Docker.start')
@mock.patch('docker.manager.Docker.stop')
@mock.patch('frigg_worker.builds.parse_coverage')
@mock.patch('frigg_worker.builds.Build.clone_repo')
@mock.patch('frigg_worker.builds.Build.run_task')
@mock.patch('docker.manager.Docker.read_file')
@mock.patch('frigg_worker.builds.Build.report_run', lambda *x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_NO_SERVICES)
def test_run_tests(self, mock_read_file, mock_run_task, mock_clone_repo,
mock_parse_coverage, mock_docker_stop, mock_docker_start):
self.build.run_tests()
mock_run_task.assert_called_once_with('tox')
self.assertTrue(mock_clone_repo.called)
mock_read_file.assert_called_once_with('~/builds/1/coverage.xml')
self.assertTrue(mock_parse_coverage.called)
self.assertTrue(self.build.succeeded)
self.assertTrue(self.build.finished)
@mock.patch('frigg_worker.builds.Build.clone_repo')
@mock.patch('frigg_worker.builds.Build.run_task', side_effect=OSError())
@mock.patch('frigg_worker.builds.Build.report_run', lambda *x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_WITH_NO_SERVICES)
def test_run_tests_fail_task(self, mock_run_task, mock_clone_repo):
self.build.run_tests()
self.assertTrue(mock_clone_repo.called)
mock_run_task.assert_called_once_with('tox')
self.assertFalse(self.build.succeeded)
self.assertTrue(self.build.finished)
@mock.patch('frigg_worker.builds.Build.run_task')
@mock.patch('frigg_worker.builds.Build.clone_repo', side_effect=GIT_ERROR)
def test_run_tests_fail_clone(self, mock_clone, mock_run_task):
self.build.run_tests()
self.assertFalse(mock_run_task.called)
self.assertFalse(self.build.succeeded)
@mock.patch('frigg_worker.api.APIWrapper.report_run')
@mock.patch('frigg_worker.builds.Build.serializer', lambda *x: {})
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: {})
def test_report_run(self, mock_report_run):
self.build.report_run()
mock_report_run.assert_called_once_with('Build', 1, '{}')
@mock.patch('docker.manager.Docker.directory_exist')
@mock.patch('docker.manager.Docker.run')
def test_delete_working_dir(self, mock_local_run, mock_directory_exist):
self.build.delete_working_dir()
self.assertTrue(mock_directory_exist.called)
mock_local_run.assert_called_once_with('rm -rf ~/builds/1')
@mock.patch('docker.manager.Docker.run')
@mock.patch('frigg_worker.builds.Build.delete_working_dir', lambda x: True)
@mock.patch('frigg_worker.builds.Build.clone_repo', lambda x: True)
@mock.patch('frigg_worker.builds.Build.parse_coverage', lambda x: True)
@mock.patch('frigg_worker.builds.Build.report_run', lambda x: None)
@mock.patch('frigg_worker.jobs.build_settings', lambda *x: BUILD_SETTINGS_SERVICES_AND_SETUP)
def test_build_setup_steps(self, mock_docker_run):
self.build.run_tests()
mock_docker_run.assert_has_calls([
mock.call('sudo service redis-server start'),
mock.call('sudo service postgresql start'),
mock.call('sudo service nginx start'),
mock.call('sudo service mongodb start'),
mock.call('apt-get install nginx', self.build.working_directory),
mock.call('tox', self.build.working_directory),
])
def test_run_build_should_call_after_success_on_successful_build(mocker):
mocker.patch('frigg_worker.builds.Build.clone_repo')
mocker.patch('frigg_worker.builds.Build.run_task')
mocker.patch('frigg_worker.builds.Build.report_run',)
mocker.patch('frigg_worker.jobs.build_settings', return_value=BUILD_SETTINGS_WITH_AFTER_TASKS)
mock_run_after = mocker.patch('frigg_worker.builds.Build.run_after_task')
build = Build(1, DATA, Docker(), WORKER_OPTIONS)
build.run_tests()
mock_run_after.assert_called_once_with('success_task')
def test_run_build_should_call_after_failure_on_failed_build(mocker):
result = ProcessResult('tox')
result.return_code = 1
mocker.patch('frigg_worker.builds.Build.clone_repo')
mocker.patch('frigg_worker.builds.Build.run_task')
mocker.patch('frigg_worker.builds.Build.report_run')
mocker.patch('frigg_worker.builds.Build.succeeded', False)
mocker.patch('frigg_worker.jobs.build_settings', return_value=BUILD_SETTINGS_WITH_AFTER_TASKS)
mock_run_after = mocker.patch('frigg_worker.builds.Build.run_after_task')
build = Build(1, DATA, Docker(), WORKER_OPTIONS)
build.run_tests()
mock_run_after.assert_called_once_with('failure_task')
|
[
"me@rolflekang.com"
] |
me@rolflekang.com
|
87f2f3fc21a0db478f1cd45aa29417581ea8d007
|
a0c53168a4bdcfb0aa917d6d2c602f0999443a10
|
/DPSPipeline/widgets/projectviewwidget/shotTreeWidgetItem.py
|
8e1ce528e98d88da4f527aa93dfc0ecae9dfdf61
|
[] |
no_license
|
kanooshka/DPS_PIPELINE
|
8067154c59ca5c8c9c09740969bb6e8537021903
|
df2fcdecda5bce98e4235ffddde1e99f334562cc
|
refs/heads/master
| 2021-05-24T04:32:03.457648
| 2018-09-07T13:25:11
| 2018-09-07T13:25:11
| 29,938,064
| 3
| 2
| null | 2020-07-23T23:06:37
| 2015-01-27T22:26:01
|
Python
|
UTF-8
|
Python
| false
| false
| 3,598
|
py
|
from PyQt4 import QtCore,QtGui
from DPSPipeline.widgets import taskProgressButton
from DPSPipeline.widgets import userLabel
import sharedDB
import operator
class ShotTreeWidgetItem(QtGui.QTreeWidgetItem):
def __init__(self,shotWidget = '', shotPhaseNames = [], shot = "", project = [], phases = []):
super(QtGui.QTreeWidgetItem, self).__init__()
self.shotWidget = shotWidget
self.shotPhaseNames = shotPhaseNames
self.shot = shot
self.phases = phases
self.project = project
self.btns = []
if shotWidget is not None:
self.shotWidget.addTopLevelItem(self)
#shot id
self.setText(0,(str(self.shot._idshots)))
#shot name
self.setText(1,(str(self.shot._number)))
#if tasklist less than lenshotphasenames - 2
columnIndex = 2
#sort phases
#sortedPhases = self.phases.values()
#sortedPhases.sort(key=operator.attrgetter('_startdate'))
self.setToolTip(1,("ShotID: "+str(self.shot._idshots)))
for phase in self.phases:
if phase._taskPerShot:
currentTask = None
if self.shot._tasks is not None:
for task in self.shot._tasks.values():
if task._idphases == phase._idphases:
currentTask = task
#if task didn't exist, create task
'''
if currentTask is None and sharedDB.autoCreateShotTasks:
currentTask = sharedDB.tasks.Tasks(_idphaseassignments = phase._idphaseassignments, _idprojects = self.project._idprojects, _idshots = shot._idshots, _idphases = phase._idphases, _new = 1)
self.shot._tasks[str(currentTask.id())] = (currentTask)
currentTask.Save()
#sharedDB.myTasks.append(currentTask)
'''
#create button for currentTask
#btn = self.AddProgressButton(shotWidgetItem,columnIndex,85,currentTask._status)
btn = taskProgressButton.TaskProgressButton(_task=currentTask,_shot = self.shot, _forPhase = phase._idphases)
uLabel = userLabel.UserLabel(task = currentTask)
btn.stateChanged.connect(uLabel.getUserFromTask)
taskBtnWidget = QtGui.QWidget()
vLayout = QtGui.QHBoxLayout()
taskBtnWidget.setLayout(vLayout)
taskBtnWidget._btn = btn
taskBtnWidget._uLabel = uLabel
vLayout.addWidget(btn)
vLayout.addWidget(uLabel)
self.shotWidget.setItemWidget(self,columnIndex,taskBtnWidget)
self.btns.append(btn)
#connect button state changed signal to task
#print "Connecting statechange to: "+str(currentTask._idtasks)
#btn.stateChanged.connect(currentTask.setShit)
#btn.stateChanged.connect(self.test)
columnIndex +=1
def deselect(self):
self.shotWidget.UpdateBackgroundColors()
def select(self):
try:
bgc = QtGui.QColor(250,250,0)
for col in range(0,self.shotWidget.columnCount()):
self.setBackground(col,bgc)
except:
print "Unable to change color on shot item, sequence was removed from list"
|
[
"kanooshka@gmail.com"
] |
kanooshka@gmail.com
|
a7f58b1d085e989f8ebfde64f6717ef978130d69
|
9b6f36f544af5a2c1c042b18dda920c78fd11331
|
/omsBackend/apps/process/views.py
|
44b2afb8c52ee8a722fefa1cda93256917efa29b
|
[] |
no_license
|
Nikita-stels/MyOms
|
a946f08b4ba7abfa8392e98c579320b501a7ca2a
|
fdaf9d5a2a29b5386c1a86fcf89a2c0d5527687a
|
refs/heads/master
| 2022-09-17T20:40:45.228067
| 2020-01-08T14:41:04
| 2020-01-08T14:41:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,817
|
py
|
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework import viewsets
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.filters import SearchFilter
from apps.process.filters import ProcessFilterBackend, IPPortBindingFilterBackend
from apps.process.serializers import ProcessSerlizer, IPPortSerializer
from apps.process.models import Process, IPPortBinding
# Create your views here.
class ProcessViewSet(viewsets.ModelViewSet):
filter_backends = (SearchFilter, DjangoFilterBackend, ProcessFilterBackend)
serializer_class = ProcessSerlizer
queryset = Process.objects.all()
filter_fields = ['process_name', 'process_version', 'process_default_boot', 'process_type', 'process_quantity',
'process_enable_monitoring', 'process_charge', 'process_port', 'process_binding_IP',
'process_port_type', 'process_agreement', 'process_affiliated_process', 'process_is_enabled',
'process_server', 'process_vmserver']
search_fields = ['process_name', 'process_version', 'process_default_boot', 'process_type', 'process_quantity',
'process_enable_monitoring', 'process_charge', 'process_port', 'process_binding_IP',
'process_port_type', 'process_agreement', 'process_affiliated_process', 'process_is_enabled',
'process_server', 'process_vmserver']
class IPPortBindingViewSets(viewsets.ModelViewSet):
filter_backends = (SearchFilter,DjangoFilterBackend,IPPortBindingFilterBackend)
queryset = IPPortBinding.objects.all()
serializer_class = IPPortSerializer
filter_fields = ['ip_ip', 'ip_port', 'ip_process', 'ip_server', 'ip_vmserver']
search_fields = ['ip_ip', 'ip_port', 'ip_process', 'ip_server', 'ip_vmserver']
|
[
"1069195546@qq.com"
] |
1069195546@qq.com
|
1ae050f68b58556641dae5383d76864138ae3f4d
|
cd4bbecc3f713b0c25508d0c5674d9e103db5df4
|
/toontown/coghq/DistributedCogKartAI.py
|
3e086524d2d0f41d184b2f3a51523165283ae197
|
[] |
no_license
|
peppythegod/ToontownOnline
|
dce0351cfa1ad8c476e035aa3947fdf53de916a6
|
2e5a106f3027714d301f284721382cb956cd87a0
|
refs/heads/master
| 2020-04-20T05:05:22.934339
| 2020-01-02T18:05:28
| 2020-01-02T18:05:28
| 168,646,608
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,621
|
py
|
from direct.directnotify import DirectNotifyGlobal
from toontown.safezone import DistributedGolfKartAI
from toontown.building import DistributedElevatorExtAI
from toontown.building import ElevatorConstants
from toontown.toonbase import ToontownGlobals
class DistributedCogKartAI(DistributedElevatorExtAI.DistributedElevatorExtAI):
notify = DirectNotifyGlobal.directNotify.newCategory(
'DistributedCogKartAI')
def __init__(self, air, index, x, y, z, h, p, r, bldg, minLaff):
self.posHpr = (x, y, z, h, p, r)
DistributedElevatorExtAI.DistributedElevatorExtAI.__init__(
self, air, bldg, minLaff=minLaff)
self.type = ElevatorConstants.ELEVATOR_COUNTRY_CLUB
self.courseIndex = index
if self.courseIndex == 0:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntA
elif self.courseIndex == 1:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntB
elif self.courseIndex == 2:
self.countryClubId = ToontownGlobals.BossbotCountryClubIntC
else:
self.countryClubId = 12500
def getPosHpr(self):
return self.posHpr
def elevatorClosed(self):
numPlayers = self.countFullSeats()
if numPlayers > 0:
players = []
for i in self.seats:
if i not in [None, 0]:
players.append(i)
continue
countryClubZone = self.bldg.createCountryClub(
self.countryClubId, players)
for seatIndex in range(len(self.seats)):
avId = self.seats[seatIndex]
if avId:
self.sendUpdateToAvatarId(
avId, 'setCountryClubInteriorZone', [countryClubZone])
self.clearFullNow(seatIndex)
continue
else:
self.notify.warning('The elevator left, but was empty.')
self.fsm.request('closed')
def sendAvatarsToDestination(self, avIdList):
if len(avIdList) > 0:
countryClubZone = self.bldg.createCountryClub(
self.countryClubId, avIdList)
for avId in avIdList:
if avId:
self.sendUpdateToAvatarId(
avId, 'setCountryClubInteriorZoneForce',
[countryClubZone])
continue
def getCountryClubId(self):
return self.countryClubId
def enterClosed(self):
DistributedElevatorExtAI.DistributedElevatorExtAI.enterClosed(self)
self.fsm.request('opening')
|
[
"47166977+peppythegod@users.noreply.github.com"
] |
47166977+peppythegod@users.noreply.github.com
|
c511b2f3500d3da95d7f4d3f4d79227c85982803
|
287f810559d6669ab566abb82d52f7673ddc5248
|
/virtual/bin/sqlformat
|
361c2d24afeb3d2456296ff97d9f4d4eef9cd901
|
[
"MIT"
] |
permissive
|
Mariga123/studious-octo-giggle
|
dc70b52605a5b4dacd8b529b6d2a402fc880076f
|
3acc7d5e6a4814fae7744a5fb8524f810d891a65
|
refs/heads/master
| 2023-02-21T08:31:26.862617
| 2021-01-20T08:24:08
| 2021-01-20T08:24:08
| 330,034,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 252
|
#!/home/moringa/Documents/instagram/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from sqlparse.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"johnmariga8@gmail.com"
] |
johnmariga8@gmail.com
|
|
410beadaabe051765f1ac70355a622299ed919a8
|
fd7598754b87536d3072edee8e969da2f838fa03
|
/chapter5_programming11.py
|
ffba02a5ad2e212b0444d81702c221abbb78ca22
|
[] |
no_license
|
dorabelme/Python-Programming-An-Introduction-to-Computer-Science
|
7de035aef216b2437bfa43b7d49b35018e7a2153
|
3c60c9ecfdd69cc9f47b43f4a8e6a13767960301
|
refs/heads/master
| 2020-05-02T23:19:44.573072
| 2019-03-28T21:27:20
| 2019-03-28T21:27:20
| 178,261,171
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 507
|
py
|
# File: chaos_modified.py
# A simple program illustrating chaotic behavior.
def main():
print("This program illustrates chaotic function")
num1 = eval(input("Enter a number between 0 and 1: "))
num2 = eval(input("Enter a number between 0 and 1: "))
# Display table
print('\n{0} {1:^8} {2:^8}'.format('index', num1, num2))
print('-' * 27)
for i in range(1,11):
num1 = 3.9 * num1 * (1 - num1)
num2 = 3.9 * num2 * (1 - num2)
print('\n{0:^5} {1:8.6f} {2:8.6f}'.format(i, num1, num2))
main()
|
[
"contact.dorabelme@gmail.com"
] |
contact.dorabelme@gmail.com
|
7f1d01efbf844df24be9e5b7c02597594675641d
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/html_parsing/get_game_genres/genre_translate_file/load.py
|
1eae3f325e8e436432fd7a4efb27dd080bb8d960
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164
| 2021-06-11T22:59:07
| 2021-06-11T22:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,076
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
import json
from pathlib import Path
FILE_NAME_GENRE_TRANSLATE = str(Path(__file__).parent.resolve() / 'data' / 'genre_translate.json')
def load(file_name: str = FILE_NAME_GENRE_TRANSLATE) -> dict:
try:
genre_translate = json.load(
open(file_name, encoding='utf-8')
)
except:
genre_translate = dict()
return genre_translate
if __name__ == '__main__':
genre_translate = load()
print(f'Genre_translate ({len(genre_translate)}): {genre_translate}')
print()
# Print all undefined genres without '{' / '}' and indent
genre_null_translate = {
k: v
for k, v in genre_translate.items()
if v is None
}
print(f'Genre null translate ({len(genre_null_translate)}):')
json_text = json.dumps(genre_null_translate, ensure_ascii=False, indent=4)
lines = json_text.splitlines()[1:-1]
for i, line in enumerate(lines):
print(line.strip())
if i > 0 and i % 40 == 0:
print()
|
[
"ilya.petrash@inbox.ru"
] |
ilya.petrash@inbox.ru
|
33f8f508eb2e8185c685dd2b5b388d1f9e079a0a
|
53818da6c5a172fe8241465dcbbd34fba382820d
|
/PythonProgram/chapter_05/5-5.py
|
ab0c75778706842c11cba10bdc52e8e4bede383c
|
[] |
no_license
|
Lethons/PythonExercises
|
f4fec3bcbfea4c1d8bc29dfed5b770b6241ad93b
|
81d588ffecf543ec9de8c1209c7b26c3d6a423b3
|
refs/heads/master
| 2021-04-15T11:36:08.991028
| 2018-07-07T09:20:40
| 2018-07-07T09:20:40
| 126,686,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 180
|
py
|
alien_color = 'green'
if alien_color == 'green':
print("You get 5 points.")
elif alien_color == 'yellow':
print("You get 10 points.")
else:
print("You get 15 points.")
|
[
"lethons@163.com"
] |
lethons@163.com
|
8f26b73981652e2db2c64f29c1ac6de561ee78d0
|
3b786d3854e830a4b46ee55851ca186becbfa650
|
/SystemTesting/pylib/nsx/vsm/virtual_wire/schema/virtual_wire_schema.py
|
454889206cae5de5297c0f26a74ec9af814fad7c
|
[] |
no_license
|
Cloudxtreme/MyProject
|
d81f8d38684333c22084b88141b712c78b140777
|
5b55817c050b637e2747084290f6206d2e622938
|
refs/heads/master
| 2021-05-31T10:26:42.951835
| 2015-12-10T09:57:04
| 2015-12-10T09:57:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,209
|
py
|
import base_schema
from type_schema import TypeSchema
from vds_context_with_backing_schema import VdsContextWithBackingSchema
class VirtualWireSchema(base_schema.BaseSchema):
"""This schema is not used for configuration
This will be filled in during GET calls
"""
_schema_name = "virtualWire"
def __init__(self, py_dict=None):
""" Constructor to create VirtualWireSchema object
@param py_dict : python dictionary to construct this object
"""
super(VirtualWireSchema, self).__init__()
self.set_data_type('xml')
self.objectId = None
self.objectTypeName = None
self.vsmUuid = None
self.revision = None
self.type = TypeSchema()
self.name = None
self.description = None
self.extendedAttributes = None
self.clientHandle = None
self.tenantId = None
self.vdnScopeId = None
self.vdsContextWithBacking = VdsContextWithBackingSchema()
self.vdnId = None
self.multicastAddr = None
self.controlPlaneMode = None
self.isUniversal = None
self.vsmUuid = None
self.universalRevision = None
self.ctrlLsUuid = None
|
[
"bpei@vmware.com"
] |
bpei@vmware.com
|
157173d15f010222521b1518dd61e2ed944ee316
|
0d7ad3520c8e039cc47dff664c7e661a6df933b4
|
/blog/migrations/0001_initial.py
|
7cc537495cd902d1123df02690d7c224ca3ec31d
|
[] |
no_license
|
skyjan0428/MyBlog
|
c61f710802bc3b599b9449ccb60ae00ed171e4ee
|
a9a6e209087b557530600d03168cf77e6de53feb
|
refs/heads/master
| 2022-12-10T01:47:06.397172
| 2021-01-06T18:29:41
| 2021-01-06T18:29:41
| 197,189,345
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,965
|
py
|
# Generated by Django 2.2.3 on 2019-07-26 10:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=10)),
('email', models.CharField(max_length=30)),
('password', models.CharField(max_length=100)),
('description', models.TextField(null=True)),
],
),
migrations.CreateModel(
name='Token',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('token', models.CharField(max_length=255, null=True, unique=True)),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='token_user', to='blog.User')),
],
),
migrations.CreateModel(
name='Relationship',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('value', models.IntegerField()),
('user1', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='self', to='blog.User')),
('user2', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='other', to='blog.User')),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('content', models.TextField()),
('attach', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, related_name='attach_to_post', to='blog.Post')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='post_user_id', to='blog.User')),
],
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to='photo/')),
('is_sticker', models.BooleanField(default=False)),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('post', models.ForeignKey(default='', null=True, on_delete=django.db.models.deletion.PROTECT, related_name='photo_post_id', to='blog.Post')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='photo_user_id', to='blog.User')),
],
),
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('text', models.TextField(null=True)),
('reciever', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='reciever', to='blog.User')),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='sender', to='blog.User')),
],
),
migrations.CreateModel(
name='LikePost',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('post', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='like_post_id', to='blog.Post')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='like_user_id', to='blog.User')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(auto_now=True, verbose_name='Datetime')),
('channel_name', models.CharField(max_length=100)),
('user_id', models.ForeignKey(default='', on_delete=django.db.models.deletion.PROTECT, related_name='client_user_id', to='blog.User')),
],
),
]
|
[
"skyjan0428@gmail.com"
] |
skyjan0428@gmail.com
|
d061fd184220c552f6982d0420258b026e555e2e
|
da053e9a63434f7b7a53faef07f6d7d2800214e4
|
/zerver/tests/test_webhooks_common.py
|
8af83cb0057edd8def96cadface3c285c442a539
|
[
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] |
permissive
|
BackGroundC/zulip
|
3f8ecd4ca98f05c4bcb9be0034140404d7f187f1
|
2bd6d275a70a7683986edc72fa8585726e976604
|
refs/heads/master
| 2020-05-30T21:19:36.799304
| 2019-06-02T22:00:16
| 2019-06-02T22:00:16
| 189,969,512
| 4
| 0
|
Apache-2.0
| 2019-06-03T08:53:52
| 2019-06-03T08:53:51
| null |
UTF-8
|
Python
| false
| false
| 5,759
|
py
|
# -*- coding: utf-8 -*-
from django.http import HttpRequest
from zerver.decorator import api_key_only_webhook_view
from zerver.lib.exceptions import InvalidJSONError, JsonableError
from zerver.lib.test_classes import ZulipTestCase, WebhookTestCase
from zerver.lib.webhooks.common import \
validate_extract_webhook_http_header, \
MISSING_EVENT_HEADER_MESSAGE, MissingHTTPEventHeader, \
INVALID_JSON_MESSAGE
from zerver.models import get_user, get_realm, UserProfile
from zerver.lib.users import get_api_key
from zerver.lib.send_email import FromAddress
from zerver.lib.test_helpers import HostRequestMock
class WebhooksCommonTestCase(ZulipTestCase):
def test_webhook_http_header_header_exists(self) -> None:
webhook_bot = get_user('webhook-bot@zulip.com', get_realm('zulip'))
request = HostRequestMock()
request.META['HTTP_X_CUSTOM_HEADER'] = 'custom_value'
request.user = webhook_bot
header_value = validate_extract_webhook_http_header(request, 'X_CUSTOM_HEADER',
'test_webhook')
self.assertEqual(header_value, 'custom_value')
def test_webhook_http_header_header_does_not_exist(self) -> None:
webhook_bot = get_user('webhook-bot@zulip.com', get_realm('zulip'))
webhook_bot.last_reminder = None
notification_bot = self.notification_bot()
request = HostRequestMock()
request.user = webhook_bot
request.path = 'some/random/path'
exception_msg = "Missing the HTTP event header 'X_CUSTOM_HEADER'"
with self.assertRaisesRegex(MissingHTTPEventHeader, exception_msg):
validate_extract_webhook_http_header(request, 'X_CUSTOM_HEADER',
'test_webhook')
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path=request.path,
header_name='X_CUSTOM_HEADER',
integration_name='test_webhook',
support_email=FromAddress.SUPPORT
).rstrip()
self.assertEqual(msg.sender.email, notification_bot.email)
self.assertEqual(msg.content, expected_message)
def test_notify_bot_owner_on_invalid_json(self) -> None:
@api_key_only_webhook_view('ClientName', notify_bot_owner_on_invalid_json=False)
def my_webhook_no_notify(request: HttpRequest, user_profile: UserProfile) -> None:
raise InvalidJSONError("Malformed JSON")
@api_key_only_webhook_view('ClientName', notify_bot_owner_on_invalid_json=True)
def my_webhook_notify(request: HttpRequest, user_profile: UserProfile) -> None:
raise InvalidJSONError("Malformed JSON")
webhook_bot_email = 'webhook-bot@zulip.com'
webhook_bot_realm = get_realm('zulip')
webhook_bot = get_user(webhook_bot_email, webhook_bot_realm)
webhook_bot_api_key = get_api_key(webhook_bot)
request = HostRequestMock()
request.POST['api_key'] = webhook_bot_api_key
request.host = "zulip.testserver"
expected_msg = INVALID_JSON_MESSAGE.format(webhook_name='ClientName')
last_message_id = self.get_last_message().id
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_no_notify(request) # type: ignore # mypy doesn't seem to apply the decorator
# First verify that without the setting, it doesn't send a PM to bot owner.
msg = self.get_last_message()
self.assertEqual(msg.id, last_message_id)
self.assertNotEqual(msg.content, expected_msg.strip())
# Then verify that with the setting, it does send such a message.
with self.assertRaisesRegex(JsonableError, "Malformed JSON"):
my_webhook_notify(request) # type: ignore # mypy doesn't seem to apply the decorator
msg = self.get_last_message()
self.assertNotEqual(msg.id, last_message_id)
self.assertEqual(msg.sender.email, self.notification_bot().email)
self.assertEqual(msg.content, expected_msg.strip())
class MissingEventHeaderTestCase(WebhookTestCase):
STREAM_NAME = 'groove'
URL_TEMPLATE = '/api/v1/external/groove?stream={stream}&api_key={api_key}'
# This tests the validate_extract_webhook_http_header function with
# an actual webhook, instead of just making a mock
def test_missing_event_header(self) -> None:
self.subscribe(self.test_user, self.STREAM_NAME)
result = self.client_post(self.url, self.get_body('ticket_state_changed'),
content_type="application/x-www-form-urlencoded")
self.assert_json_error(result, "Missing the HTTP event header 'X_GROOVE_EVENT'")
webhook_bot = get_user('webhook-bot@zulip.com', get_realm('zulip'))
webhook_bot.last_reminder = None
notification_bot = self.notification_bot()
msg = self.get_last_message()
expected_message = MISSING_EVENT_HEADER_MESSAGE.format(
bot_name=webhook_bot.full_name,
request_path='/api/v1/external/groove',
header_name='X_GROOVE_EVENT',
integration_name='Groove',
support_email=FromAddress.SUPPORT
).rstrip()
if msg.sender.email != notification_bot.email: # nocoverage
# This block seems to fire occasionally; debug output:
print(msg)
print(msg.content)
self.assertEqual(msg.sender.email, notification_bot.email)
self.assertEqual(msg.content, expected_message)
def get_body(self, fixture_name: str) -> str:
return self.webhook_fixture_data("groove", fixture_name, file_type="json")
|
[
"tabbott@zulipchat.com"
] |
tabbott@zulipchat.com
|
13a149b3ba1a58fb0f39d1ad41efb1703271525b
|
a1a43879a2da109d9fe8d9a75f4fda73f0d7166b
|
/api/tests/cross_entropy.py
|
ba5e4f8834015d4ed91cf6316b10f03edd472423
|
[] |
no_license
|
PaddlePaddle/benchmark
|
a3ed62841598d079529c7440367385fc883835aa
|
f0e0a303e9af29abb2e86e8918c102b152a37883
|
refs/heads/master
| 2023-09-01T13:11:09.892877
| 2023-08-21T09:32:49
| 2023-08-21T09:32:49
| 173,032,424
| 78
| 352
| null | 2023-09-14T05:13:08
| 2019-02-28T03:14:16
|
Python
|
UTF-8
|
Python
| false
| false
| 5,138
|
py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from common_import import *
@benchmark_registry.register("cross_entropy")
class CrossEntropyConfig(APIConfig):
def __init__(self):
super(CrossEntropyConfig, self).__init__("cross_entropy")
def init_from_json(self, filename, config_id=0, unknown_dim=16):
super(CrossEntropyConfig, self).init_from_json(filename, config_id,
unknown_dim)
input_rank = len(self.input_shape)
if not hasattr(self, "axis") or self.axis == input_rank - 1:
self.axis = -1
self.num_classes = self.input_shape[self.axis]
self.feed_spec = [
{
"range": [0, 1]
}, # input
{
"range": [0, self.num_classes]
} # label
]
if self.label_dtype in ['float32', 'float64'] or self.axis != -1:
self.run_tf = False
if self.soft_label or self.axis != -1:
print(
"Warning:\n"
" 1. PyTorch does not have soft_label param, it only support hard label.\n"
)
self.run_torch = False
else:
if input_rank != 2:
self.input_shape = [
np.prod(self.input_shape[0:input_rank - 1]),
self.input_shape[-1]
]
label_rank = len(self.label_shape)
if label_rank != 2:
self.label_shape = [
np.prod(self.label_shape[0:label_rank - 1]), 1
]
def to_pytorch(self):
torch_config = super(CrossEntropyConfig, self).to_pytorch()
if self.label_shape[-1] == 1:
label_rank = len(self.label_shape)
torch_config.label_shape = [
np.prod(self.label_shape[0:label_rank - 1])
]
return torch_config
def to_tensorflow(self):
tf_config = super(CrossEntropyConfig, self).to_tensorflow()
label_rank = len(tf_config.label_shape)
if tf_config.label_shape[label_rank - 1] == 1:
tf_config.label_shape = tf_config.label_shape[0:label_rank - 1]
return tf_config
@benchmark_registry.register("cross_entropy")
class PaddleCrossEntropy(PaddleOpBenchmarkBase):
def build_graph(self, config):
input = self.variable(
name="input", shape=config.input_shape, dtype=config.input_dtype)
label = self.variable(
name="label",
shape=config.label_shape,
dtype=config.label_dtype,
stop_gradient=True)
result = paddle.nn.functional.cross_entropy(
input=input,
label=label,
weight=None,
ignore_index=config.ignore_index,
soft_label=config.soft_label,
use_softmax=True,
axis=config.axis,
reduction="none")
self.feed_list = [input, label]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [input])
@benchmark_registry.register("cross_entropy")
class TorchCrossEntropy(PytorchOpBenchmarkBase):
def build_graph(self, config):
input = self.variable(
name="input", shape=config.input_shape, dtype=config.input_dtype)
label = self.variable(
name='label',
shape=config.label_shape,
dtype=config.label_dtype,
stop_gradient=True)
result = torch.nn.functional.cross_entropy(
input=input,
target=label,
weight=None,
ignore_index=config.ignore_index,
reduction="none")
self.feed_list = [input, label]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [input])
@benchmark_registry.register("cross_entropy")
class TFCrossEntropy(TensorflowOpBenchmarkBase):
def build_graph(self, config):
input = self.variable(
name='input', shape=config.input_shape, dtype=config.input_dtype)
label = self.variable(
name='label', shape=config.label_shape, dtype=config.label_dtype)
onehot_label = tf.one_hot(indices=label, depth=config.num_classes)
result = tf.compat.v1.losses.softmax_cross_entropy(
input=input, onehot_labels=onehot_label, reduction='none')
self.feed_list = [input, label]
self.fetch_list = [result]
if config.backward:
self.append_gradients(result, [input])
|
[
"noreply@github.com"
] |
PaddlePaddle.noreply@github.com
|
91f176c7a6e6def0b43003d56c17e90b754666b2
|
b59372692c912ba17ec2e6812983663a6deccdaf
|
/.history/bsServer/views_20200503165259.py
|
143f159593a5a9133cc1f224d21ef41dff7767ae
|
[] |
no_license
|
nanjigirl/bs-server-project
|
2d7c240ddf21983ed0439829a7995bde94082467
|
7863aed279b233d359c540c71fdd08ce8633976b
|
refs/heads/master
| 2022-08-02T17:33:48.201967
| 2020-05-25T15:18:34
| 2020-05-25T15:18:34
| 261,204,713
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
import json
from django.http import HttpResponse,JsonResponse
from .models import *
# Create your views here.
def index(request):
try:
booklist = BookInfo.objects.all()
finally:
booklist = ['id']
return JsonResponse({"status": "200", "list": booklist, "msg": "query articles sucess."})
|
[
"chenxueb@yonyou.com"
] |
chenxueb@yonyou.com
|
d879b881b4190309369829a872ce622950608251
|
a9c43c4b1a640841f1c9b13b63e39422c4fc47c2
|
/test/tests/import_target.py
|
e7cf80e2bef7875d664ecdefab6405a10e10eaac
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0"
] |
permissive
|
lovejavaee/pyston
|
be5bd8393462be17259bcc40bf8f745e157d9793
|
e8f0d9667c35db043add2f07a0ea7d23e290dd80
|
refs/heads/master
| 2023-05-01T17:42:35.616499
| 2015-04-07T08:10:44
| 2015-04-07T08:10:44
| 33,535,295
| 0
| 0
|
NOASSERTION
| 2023-04-14T02:16:28
| 2015-04-07T09:53:36
|
Python
|
UTF-8
|
Python
| false
| false
| 260
|
py
|
print "starting import of", __name__
import import_nested_target
x = 1
def foo():
print "foo()"
# def k():
# print x
# foo()
class C(object):
pass
_x = 1
z = 2
__all__ = ['x', u'z']
def letMeCallThatForYou(f, *args):
return f(*args)
|
[
"kmod@dropbox.com"
] |
kmod@dropbox.com
|
6722ed3d726a927b17bcea9c41278855f5335c1a
|
2455062787d67535da8be051ac5e361a097cf66f
|
/Producers/BSUB/TrigProd_amumu_a5_dR5/trigger_amumu_producer_cfg_TrigProd_amumu_a5_dR5_655.py
|
e4071b59278c64f6a03c66fc4e7ed1dde4ff1dfa
|
[] |
no_license
|
kmtos/BBA-RecoLevel
|
6e153c08d5ef579a42800f6c11995ee55eb54846
|
367adaa745fbdb43e875e5ce837c613d288738ab
|
refs/heads/master
| 2021-01-10T08:33:45.509687
| 2015-12-04T09:20:14
| 2015-12-04T09:20:14
| 43,355,189
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,360
|
py
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PAT")
#process.load("BBA/Analyzer/bbaanalyzer_cfi")
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load('Configuration.EventContent.EventContent_cff')
process.load("Configuration.Geometry.GeometryRecoDB_cff")
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'MCRUN2_71_V1::All', '')
process.load("Configuration.StandardSequences.MagneticField_cff")
####################
# Message Logger
####################
process.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(100)
process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
## switch to uncheduled mode
process.options.allowUnscheduled = cms.untracked.bool(True)
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(500)
)
####################
# Input File List
####################
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('root://eoscms//eos/cms/store/user/ktos/RECO_Step3_amumu_a5/RECO_Step3_amumu_a5_655.root'),
secondaryFileNames = cms.untracked.vstring()
)
############################################################
# Defining matching in DeltaR, sorting by best DeltaR
############################################################
process.mOniaTrigMatch = cms.EDProducer("PATTriggerMatcherDRLessByR",
src = cms.InputTag( 'slimmedMuons' ),
matched = cms.InputTag( 'patTrigger' ), # selections of trigger objects
matchedCuts = cms.string( 'type( "TriggerMuon" ) && path( "HLT_Mu16_TkMu0_dEta18_Onia*")' ), # input does not yet have the 'saveTags' parameter in HLT
maxDPtRel = cms.double( 0.5 ), # no effect here
maxDeltaR = cms.double( 0.3 ), #### selection of matches
maxDeltaEta = cms.double( 0.2 ), # no effect here
resolveAmbiguities = cms.bool( True ),# definition of matcher output
resolveByMatchQuality = cms.bool( True )# definition of matcher output
)
# talk to output module
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string("file:RECO_Step3_amumu_a5_TrigProd_655.root"),
outputCommands = process.MINIAODSIMEventContent.outputCommands
)
process.out.outputCommands += [ 'drop *_*_*_*',
'keep *_*slimmed*_*_*',
'keep *_pfTausEI_*_*',
'keep *_hpsPFTauProducer_*_*',
'keep *_hltTriggerSummaryAOD_*_*',
'keep *_TriggerResults_*_HLT',
'keep *_patTrigger*_*_*',
'keep *_prunedGenParticles_*_*',
'keep *_mOniaTrigMatch_*_*'
]
################################################################################
# Running the matching and setting the the trigger on
################################################################################
from PhysicsTools.PatAlgos.tools.trigTools import *
switchOnTrigger( process ) # This is optional and can be omitted.
switchOnTriggerMatching( process, triggerMatchers = [ 'mOniaTrigMatch'
])
process.outpath = cms.EndPath(process.out)
|
[
"kmtos@ucdavis.edu"
] |
kmtos@ucdavis.edu
|
ea3b22f393e04ed27a6ffedc6c3aaa95258b18ab
|
3bda645720e87bba6c8f960bbc8750dcea974cb0
|
/data/phys/fill_6170/xangle_150/DoubleEG/input_files.py
|
985d3dcf0e70b9d93931efead2318476c82c0a8e
|
[] |
no_license
|
jan-kaspar/analysis_ctpps_alignment_2017_preTS2
|
0347b8f4f62cf6b82217935088ffb2250de28566
|
0920f99080a295c4e942aa53a2fe6697cdff0791
|
refs/heads/master
| 2021-05-10T16:56:47.887963
| 2018-01-31T09:28:18
| 2018-01-31T09:28:18
| 118,592,149
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 207
|
py
|
import FWCore.ParameterSet.Config as cms
input_files = cms.vstring(
"root://eostotem.cern.ch//eos/totem/data/ctpps/reconstruction/2017/preTS2_alignment_data/version1/fill6170_xangle150_DoubleEG.root"
)
|
[
"jan.kaspar@cern.ch"
] |
jan.kaspar@cern.ch
|
abacba136794a358da8c60b230c31e0840e0247c
|
105212e4d2d2175d5105e05552e29b300375e039
|
/TensorFlow_tutorials/TensorFlow_train_own_data/tensorflow_train_owndata_detector/xml_to_csv.py
|
6919392daf578857478fc763bed6d969d0f08fc6
|
[] |
no_license
|
Asher-1/AI
|
84f0c42651c0b07e6b7e41ebb354258db64dd0d1
|
a70f63ebab3163f299f7f9d860a98695c0a3f7d5
|
refs/heads/master
| 2022-11-26T07:24:37.910301
| 2019-05-30T13:04:31
| 2019-05-30T13:04:31
| 160,031,310
| 7
| 1
| null | 2022-11-21T22:02:53
| 2018-12-02T09:19:03
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,775
|
py
|
import os
import glob
import pandas as pd
import xml.etree.ElementTree as ET
import random
train_csv_path = 'my_data/toy_train_labels.csv'
val_csv_path = 'my_data/toy_val_labels.csv'
# 验证集和数据集比例
train_val_rate = 0.7
def xml_to_csv(examples_list):
xml_list = []
for xml_file in examples_list:
tree = ET.parse(xml_file)
root = tree.getroot()
for member in root.findall('object'):
value = (root.find('filename').text,
int(root.find('size')[0].text),
int(root.find('size')[1].text),
member[0].text,
int(member[4][0].text),
int(member[4][1].text),
int(member[4][2].text),
int(member[4][3].text)
)
xml_list.append(value)
column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax']
xml_df = pd.DataFrame(xml_list, columns=column_name)
return xml_df
def main():
image_path = os.path.join(os.getcwd(), 'my_data', 'annotations')
examples_list = glob.glob(image_path + '/*.xml')
random.seed(42)
random.shuffle(examples_list)
num_examples = len(examples_list)
num_train = int(train_val_rate * num_examples)
train_examples_list = examples_list[:num_train]
val_examples_list = examples_list[num_train:]
# 转化训练集数据
xml_df = xml_to_csv(train_examples_list)
xml_df.to_csv(train_csv_path, index=None)
print('Successfully converted xml to %s.' % train_csv_path)
# 转化验证集数据
xml_df = xml_to_csv(val_examples_list)
xml_df.to_csv(val_csv_path, index=None)
print('Successfully converted xml to %s.' % val_csv_path)
main()
|
[
"ludahai19@163.com"
] |
ludahai19@163.com
|
bda86386777469912ab81722f4c03c92ed25e5d0
|
16cc8f796eac98e9a475da11e4bc0aa26317e894
|
/AOJ/ITP1_9_A.py
|
d688dbd021d385c3894b020d918e26b2bc588028
|
[] |
no_license
|
amaguri0408/AtCoder-python
|
2f3fcdd82c52f5ddee88627fb99466c9e003164f
|
ab8ec04b8e434939e9f7035f3a280b30c0682427
|
refs/heads/master
| 2022-10-30T00:07:03.560011
| 2020-06-13T10:41:36
| 2020-06-13T10:41:36
| 271,954,405
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 253
|
py
|
W = input()
T = []
while True:
a = input().split()
if a == ["END_OF_TEXT"]:
break
else:
for i in a:
T.append(i)
cnt = 0
for i in range(len(T)):
if str.lower(T[i]) == W:
cnt += 1
print(cnt)
|
[
"noreply@github.com"
] |
amaguri0408.noreply@github.com
|
b50fa8b164038273f0f2510a5fa6d6b70bf5e855
|
0206ac23a29673ee52c367b103dfe59e7733cdc1
|
/src/util/geo/misc.py
|
d49ea24c1dc86ad4e3c2233fae7cbe746f7c5725
|
[] |
no_license
|
guziy/RPN
|
2304a93f9ced626ae5fc8abfcc079e33159ae56a
|
71b94f4c73d4100345d29a6fbfa9fa108d8027b5
|
refs/heads/master
| 2021-11-27T07:18:22.705921
| 2021-11-27T00:54:03
| 2021-11-27T00:54:03
| 2,078,454
| 4
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 153
|
py
|
def deg_min_sec_to_deg(d, m, s, hem="N"):
if hem in ["S", "W"]:
mul = -1
else:
mul = 1
return mul * (d + m / 60 + s / 3600)
|
[
"guziy.sasha@gmail.com"
] |
guziy.sasha@gmail.com
|
a193f8c7e319471422ad1e32ddfd1539b1cf5509
|
0469f9c57df4081527c7c1447881b23543fcd4d7
|
/migrations/versions/e854ed68756a_init_commit.py
|
883c4de77a1f5a4a80a00820bfa4b994218bcc69
|
[] |
no_license
|
Fajaragst/open-vote-api
|
6585934977e5d0bc1c7d399b4212142c670a8380
|
011acf09ebd6493792d32bcb7410840ad97ca092
|
refs/heads/master
| 2023-03-24T20:55:45.751666
| 2019-07-21T14:11:12
| 2019-07-21T14:11:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,327
|
py
|
"""init commit
Revision ID: e854ed68756a
Revises:
Create Date: 2019-03-07 12:01:41.964280
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'e854ed68756a'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('election',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=144), nullable=True),
sa.Column('images', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('candidate',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('name', sa.String(length=144), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('images', sa.String(length=255), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('election_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['election_id'], ['election.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
op.create_table('user',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('username', sa.String(length=144), nullable=True),
sa.Column('identity_id', sa.String(length=144), nullable=True),
sa.Column('name', sa.String(length=144), nullable=True),
sa.Column('msisdn', sa.String(length=12), nullable=True),
sa.Column('email', sa.String(length=144), nullable=True),
sa.Column('images', sa.String(length=144), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('password', sa.String(length=128), nullable=True),
sa.Column('status', sa.Integer(), nullable=True),
sa.Column('role', sa.Integer(), nullable=True),
sa.Column('candidate_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.ForeignKeyConstraint(['candidate_id'], ['candidate.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('id'),
sa.UniqueConstraint('identity_id'),
sa.UniqueConstraint('msisdn'),
sa.UniqueConstraint('username')
)
op.create_table('vote',
sa.Column('id', postgresql.UUID(as_uuid=True), nullable=False),
sa.Column('candidate_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('user_id', postgresql.UUID(as_uuid=True), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['candidate_id'], ['candidate.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('vote')
op.drop_table('user')
op.drop_table('candidate')
op.drop_table('election')
# ### end Alembic commands ###
|
[
"kelvindsmn@gmail.com"
] |
kelvindsmn@gmail.com
|
59f9852b28460e6d5f9fbd669f9b8feed10e8782
|
396b3046c70a871b7fe5efe668c7cfc3d02cdc73
|
/EE-data-analysis/input_data_processing/takeout_valid_ids.py
|
baf3b2306612c31e6725fb4ddc5657911f0a2325
|
[] |
no_license
|
imbornagainer/Python_Project
|
01e22532c7504d40534ff6217e91a8f44c53ab17
|
08332af7341846bceb7b410a2f59d7f51bba7d70
|
refs/heads/master
| 2021-05-12T06:15:40.793504
| 2018-02-26T02:26:45
| 2018-02-26T02:26:45
| 117,215,058
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,898
|
py
|
# -*- coding: utf-8 -*-
# Author : jeonghoonkang , https://github.com/jeonghoonkang
# Author : jeongmoon417 , https://github.com/jeongmoon417
# 참고 url
# https://code.tutsplus.com/ko/tutorials/how-to-work-with-excel-documents-using-python--cms-25698
# http://egloos.zum.com/mcchae/v/11120944
import datetime
import openpyxl
import xlsxwriter
import sys
# sys.path.insert(0, '../doc_design')
# (to do) have to find how to add different location directory path and file
# now just using same dir location file
# from openpyxl.workbook import Workbook
# from openpyxl.writer.excel import ExcelWriter
# (error) from openpyxl.cell import get_column_letter
# from openpyxl import load_workbook
class excell_class :
__ofile = None
def __init__(self):
pass
#@staticmethod
def open_exc_doc(self):
# using unicode file name with u syntax
__ofile = openpyxl.load_workbook(u"_test__1.xlsx")
return __ofile
def read_vertical(self, sheet, __start, __end):
__vertical = []
print " ... Please use column[n]:column[m], vertical read "
cell_of_col = sheet[__start:__end]
for row in cell_of_col:
for cell in row:
v = cell.value
if v == None:
continue # do nothing below code, back to next for loop step
__vertical.append(v) # 리스트 __vertical에 아이디 추가
return __vertical #__cnt, __cnt_n # 세로 셀 데이터, 데이터 갯수, None 갯수
# 입력 리스트를 액셀에 저장
def save_exc(self, __vdata, __fname):
__t = str(datetime.datetime.now())
workbook = xlsxwriter.Workbook(__fname + __t + '.xlsx')
worksheet = workbook.add_worksheet()
row = 0
col = 0
for item in (__vdata):
worksheet.write(row, col, item)
row += 1
workbook.close()
def save_vdata(__vdata, fname):
__t = str(datetime.datetime.now())
__odata = fname + '='
print str(__vdata)
__odata = __odata + str(__vdata)
filename = fname + '.py'
__ofile = open(filename,"w")
__ofile.write(__odata)
__ofile.close()
# EE에서 수집하는 스마트미터 ID가 '-' 두개 들어가야 하는데,
# 한개만 들어간 경우를 확인하는 함수
# 일반적으로 괜찮은 아이디 00-250060021 (대부분 이런 형식)
# 이상해 보이는 아이디 06-25-0071186 등등
def check_id(__buf):
__itter = len(__buf) # buf 리스트 길이
__err_list = [] # 회신할 리스트 버퍼
for __i in range(__itter) :
print " check it is OK ? " + __buf[__i]
try:
if __buf[__i].index('-',0) != 2 : # 2번 인덱스에 '-' 부재면 get in
# 이상한 ID 출력, 리스트에 추가
print __buf[__i]
__err_list.append(__buf[__i])
# (2번 인덱스에 '-' 존재하고, 이전 if 에서 결정)
# 그리고, 5번 인덱스에 '-' 존재이면 get in
elif __buf[__i].index('-',5) == 5 :
# 이상한 ID 출력, 리스트에 추가
print __buf[__i]
__err_list.append(__buf[__i])
except:
#괜찮은 아이디
print" Proper ID format"# __buf[__i]
#__err_list.append(__buf[__i])
return __err_list
if __name__ == "__main__":
# open excell file
eclass = excell_class()
op = eclass.open_exc_doc()
sheets = op.get_sheet_names()
print " sheets = ", sheets
sh1 = op.get_sheet_by_name(sheets[0])
print " name =", sh1
buf = eclass.read_vertical(sh1,'b1','b541')
save_vdata(buf,"result_ids")
eclass.save_exc(buf,"result_ids")
exit (" ...congrats, finish")
|
[
"BePious@DESKTOP-MT17J3L"
] |
BePious@DESKTOP-MT17J3L
|
d012c815cc66d570ca728e875aa1f6c642c7eb59
|
703a60185bb6d7607d3bff5afda2bbadfa96229c
|
/contabilidade/wsgi.py
|
2534c73dc9bf771dcbebbf2e302e272d920c203a
|
[] |
no_license
|
Eduardo-Lucas/contabilidade
|
07b07f63272e564034fcd418310222a78b28c3c1
|
c42d1ee2178d70a7301b57fe68329e732621abed
|
refs/heads/master
| 2021-09-07T23:47:16.142421
| 2018-03-03T14:09:17
| 2018-03-03T14:09:17
| 102,148,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
"""
WSGI config for contabilidade project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "contabilidade.settings")
application = get_wsgi_application()
|
[
"eduardolucas40@gmail.com"
] |
eduardolucas40@gmail.com
|
aac39ead1e326cd55a04c437ae750dd7b38ff439
|
6359831db732f929409adbb8270092c7e9cca8d5
|
/Q003_sub-array_with_maximum_sum.py
|
dea797f01d26dff8ae4cc1146a6a658e5c09d456
|
[] |
no_license
|
latika18/interviewbit
|
11237219d982c98a22f0098be8248ef7a5b9246f
|
a065b19dc368136101dafbbbdab9b664fed0bf35
|
refs/heads/master
| 2020-03-15T15:20:30.002201
| 2018-08-22T07:39:21
| 2018-08-22T07:39:21
| 132,209,831
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 828
|
py
|
def maxSubArray(A):
start ,stop = 0 , 0
curr = 0
max_sum = A[0]
current_sum = 0
for i in range(len(A)):
current_sum += A[i]
if max_sum < current_sum:
max_sum = current_sum
start = curr
stop = i
if current_sum < 0:
current_sum = 0
curr = i + 1
return max_sum , A[start:stop]
print maxSubArray([-2,-3,-4,-5])
print maxSubArray([-1])
print maxSubArray([-5, 1, -3, 7, -1, 2, 1, -4, 6])
print maxSubArray([-5, 1, -3, 7, -1, 2, 1, -6, 5])
print maxSubArray( [6, -3, -2, 7, -5, 2, 1, -7, 6])
print maxSubArray([-5, -2, -1, -4, -7])
print maxSubArray( [4, 1, 1, 4, -4, 10, -4, 10, 3, -3, -9, -8, 2, -6, -6, -5, -1, -7, 7, 8])
print maxSubArray([4, -5, -1, 0, -2, 20, -4, -3, -2, 8, -1, 10, -1, -1 ])
|
[
"noreply@github.com"
] |
latika18.noreply@github.com
|
cdab5fe6af763d9086bf84c60fb5baee3d83ced5
|
9efa07e8b0d63fc107124a8387fbe29cfc44ced9
|
/konkord/apps/users/migrations/0002_auto_20170124_1003.py
|
7e28bb774084e7e88638b6a2ba356d0f63c6a354
|
[] |
no_license
|
phonxis/konkord
|
ef7be9325bddeaceb28f1a70c094d3c7a8ea6b9b
|
cd564a41f1b0ac0665a7dee0e3f26730c7377154
|
refs/heads/master
| 2021-07-20T01:33:42.221782
| 2017-10-26T14:02:48
| 2017-10-26T14:02:48
| 105,538,403
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-01-24 10:03
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import users.models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterModelManagers(
name='user',
managers=[
('objects', users.models.UserManager()),
],
),
migrations.AlterField(
model_name='email',
name='email',
field=models.CharField(max_length=255, verbose_name='Email'),
),
migrations.AlterField(
model_name='email',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='emails', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
migrations.AlterField(
model_name='phone',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='phones', to=settings.AUTH_USER_MODEL, verbose_name='User'),
),
]
|
[
"sadjuk17@gmail.com"
] |
sadjuk17@gmail.com
|
8f99e0a8b26cfb8845d447c795dc5d253ea25304
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/25/usersdata/132/11887/submittedfiles/av1_3.py
|
611ca6bc5565b10c48ce258b48df5fbbec519a95
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
py
|
# -*- coding: utf-8 -*-
from __future__ import division
import math
a=input('digite o valor de a:')
b=input('digite o valor de b:')
i=a
x=b
c=0
while True:
if i%x!=0:
c=c+1
if i%x=0:
break
x=i%x
i=x
print(c)
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
578f222d31e790f108a9e57c8d6fddaccdbd549e
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/nouns/_mandible.py
|
72cd876319bde71fb9a98f2eeed130011b2c8db0
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 408
|
py
|
#calss header
class _MANDIBLE():
def __init__(self,):
self.name = "MANDIBLE"
self.definitions = [u'in a person or animal, the lower jaw bone', u'in insects, one of the two parts of the mouth used for biting and cutting food']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
f8a4a253473bf3585a96fcfad8f0c972ee8a9423
|
d72af5ad2b8f42b4faff296df0342dc3fbbd91ac
|
/tensorflow_estimator/python/estimator/head/base_head_test.py
|
bde6c898195caa1e958aefd6f2ac23a942e02548
|
[
"Apache-2.0"
] |
permissive
|
CheukNgai/estimator
|
22eacf5b0a366d43206e441c9d0cbb096ab12614
|
673a50bd5ffa70d0672ce47e40f5075f1cbe0a62
|
refs/heads/master
| 2020-04-01T23:09:12.173802
| 2018-11-13T10:19:22
| 2018-11-13T10:19:22
| 153,744,529
| 0
| 0
|
Apache-2.0
| 2018-11-13T10:19:23
| 2018-10-19T07:38:19
|
Python
|
UTF-8
|
Python
| false
| false
| 6,502
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for base_head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.core.framework import summary_pb2
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
from tensorflow.python.saved_model import signature_constants
from tensorflow_estimator.python.estimator import model_fn
from tensorflow_estimator.python.estimator.head import base_head
_DEFAULT_SERVING_KEY = signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
def _initialize_variables(test_case, scaffold):
scaffold.finalize()
test_case.assertIsNone(scaffold.init_feed_dict)
test_case.assertIsNone(scaffold.init_fn)
scaffold.init_op.run()
scaffold.ready_for_local_init_op.eval()
scaffold.local_init_op.run()
scaffold.ready_op.eval()
test_case.assertIsNotNone(scaffold.saver)
def _assert_simple_summaries(test_case, expected_summaries, summary_str,
tol=1e-6):
"""Assert summary the specified simple values.
Args:
test_case: test case.
expected_summaries: Dict of expected tags and simple values.
summary_str: Serialized `summary_pb2.Summary`.
tol: Tolerance for relative and absolute.
"""
summary = summary_pb2.Summary()
summary.ParseFromString(summary_str)
test_case.assertAllClose(expected_summaries, {
v.tag: v.simple_value for v in summary.value
}, rtol=tol, atol=tol)
def _assert_no_hooks(test_case, spec):
test_case.assertAllEqual([], spec.training_chief_hooks)
test_case.assertAllEqual([], spec.training_hooks)
class CreateEstimatorSpecTest(test.TestCase):
class _HeadWithTPUSupport(base_head.Head):
"""Head that overrides _create_tpu_estimator_spec."""
def name(self):
return 'HeadWithTPUSupport'
def logits_dimension(self):
return None
def loss_reduction(self):
return None
def loss(self, features, mode, logits, labels):
return None
def predictions(self, logits):
return None
def metrics(self, regularization_losses=None):
return None
def update_metrics(self, eval_metrics, features, logits, labels,
mode=None, regularization_losses=None):
return None
def _create_tpu_estimator_spec(self, features, mode, logits, labels=None,
optimizer=None, train_op_fn=None,
regularization_losses=None):
return model_fn._TPUEstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
loss=constant_op.constant(0.0, dtype=dtypes.float32))
class _HeadWithOutTPUSupport(base_head.Head):
"""Head that overrides create_estimator_spec."""
def name(self):
return 'HeadWithOutTPUSupport'
def logits_dimension(self):
return None
def loss_reduction(self):
return None
def loss(self, features, mode, logits, labels):
return None
def predictions(self, logits):
return None
def metrics(self, regularization_losses=None):
return None
def update_metrics(self, eval_metrics, features, logits, labels,
mode=None, regularization_losses=None):
return None
def create_estimator_spec(self, features, mode, logits, labels=None,
optimizer=None, train_op_fn=None,
regularization_losses=None):
return model_fn.EstimatorSpec(
mode=model_fn.ModeKeys.EVAL,
loss=constant_op.constant(0.0, dtype=dtypes.float32))
class _InvalidHead(base_head.Head):
"""Head that overrides neither estimator_spec functions."""
def name(self):
return 'InvalidHead'
def logits_dimension(self):
return None
def loss_reduction(self):
return None
def loss(self, features, mode, logits, labels):
return None
def predictions(self, logits):
return None
def metrics(self, regularization_losses=None):
return None
def update_metrics(self, eval_metrics, features, logits, labels,
mode=None, regularization_losses=None):
return None
def test_head_override_tpu_estimator_spec(self):
"""Test for `_Head` that overrides _create_tpu_estimator_spec."""
head = self._HeadWithTPUSupport()
tpu_spec = head._create_tpu_estimator_spec(
features=None, mode=None, logits=None)
self.assertTrue(isinstance(tpu_spec, model_fn._TPUEstimatorSpec))
est_spec = head.create_estimator_spec(
features=None, mode=None, logits=None)
self.assertTrue(isinstance(est_spec, model_fn.EstimatorSpec))
def test_head_override_estimator_spec(self):
"""Test for `_Head` that overrides create_estimator_spec."""
head = self._HeadWithOutTPUSupport()
with self.assertRaisesRegexp(
NotImplementedError,
'TPUEstimatorSpec not available for this model head.'):
_ = head._create_tpu_estimator_spec(
features=None, mode=None, logits=None)
est_spec = head.create_estimator_spec(
features=None, mode=None, logits=None)
self.assertTrue(isinstance(est_spec, model_fn.EstimatorSpec))
def test_invalid_head_class(self):
head = self._InvalidHead()
with self.assertRaisesRegexp(
NotImplementedError,
'TPUEstimatorSpec not available for this model head.'):
_ = head._create_tpu_estimator_spec(
features=None, mode=None, logits=None)
with self.assertRaisesRegexp(
NotImplementedError,
r'Subclasses of Head must implement `create_estimator_spec\(\)` or '
r'_create_tpu_estimator_spec\(\).'):
_ = head.create_estimator_spec(
features=None, mode=None, logits=None)
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
afcbab42b80184c81c6eedbcb945ba989acb12dc
|
5736fa4213981815bce5b2527b1db69b9405a9e3
|
/tools/pe-clr.permissions.py
|
aa15c2711586ec54bd9104ab85c43ea5ab66c656
|
[
"BSD-2-Clause"
] |
permissive
|
mmg1/syringe-1
|
a4f399f9eaf31fb784a34715aeb8dfb230c56920
|
4c38701756504de32282e95d6c4d76eb194fa46f
|
refs/heads/master
| 2020-03-28T09:04:20.664636
| 2018-07-28T03:46:10
| 2018-07-28T03:46:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,951
|
py
|
import logging, time
#logging.root.setLevel(logging.INFO)
import pecoff, ptypes
from ptypes import pstruct, dyn
from pecoff.portable import clr
class CustomField(pstruct.type):
_fields_ = [
(clr.ELEMENT_TYPE, 'FieldOrProp'),
(clr.ELEMENT_TYPE, 'FieldOrPropType'),
(clr.SerString, 'FieldOrPropName'),
(lambda s: clr.ElementType.lookup(s['FieldOrPropType'].li.int()), 'Value'),
]
class Fields(pstruct.type):
_fields_ = [
(clr.CInt, 'Count'),
(lambda s: dyn.array(CustomField, s['Count'].li.Get()), 'Fields'),
]
def log(stdout):
start = ts = time.time()
while True:
message = (yield)
ts = time.time()
print >>stdout, "{:.3f} : {:s}".format(ts - start, message)
return
def strify(value):
if isinstance(value, (int, long)):
return "{:d}".format(value)
elif isinstance(value, basestring):
return "{:s}".format(value)
return "{!r}".format(value)
if __name__ == '__main__':
import sys, os
import ptypes, pecoff
if len(sys.argv) != 2:
print >>sys.stderr, "Usage: {:s} file".format(sys.argv[0] if len(sys.argv) else 'test')
sys.exit(1)
filename = sys.argv[1]
L = log(sys.stderr); next(L)
ptypes.setsource(ptypes.prov.file(filename, mode='r'))
L.send("Loading executable for {:s}".format(os.path.basename(filename)))
z = pecoff.Executable.File()
z = z.l
dd = z['next']['header']['datadirectory'][14]
if dd['address'].int() == 0:
L.send("No IMAGE_COR20_HEADER found in executable!".format(os.path.basename(filename)))
sys.exit(2)
comdd = dd['address'].d.l
meta = comdd['MetaData']['Address'].d.l
strings = meta['StreamHeaders'].Get('#Strings')['Offset'].d
#userstrings = meta['StreamHeaders'].Get('#US')['Offset'].d
guids = meta['StreamHeaders'].Get('#GUID')['Offset'].d
blobs = meta['StreamHeaders'].Get('#Blob')['Offset'].d
htables = meta['StreamHeaders'].Get('#~')['Offset'].d
ts = time.time()
L.send("Loading heap \"{:s}\"".format('#~'))
htables.l
L.send("Loading heap \"{:s}\"".format('#Strings'))
strings.l
L.send("Loading heap \"{:s}\"".format('#GUID'))
guids.l
L.send("Loading heap \"{:s}\"".format('#Blob'))
blobs.l
L.send("Finished loading heaps in {:.3f}".format(time.time()-ts))
tables = htables['tables']
# output modules
L.send("Enumerating {:d} modules.".format(len(tables['Module'])))
modules = []
for i, m in enumerate(tables['Module']):
res = strings.field(m['Name'].int())
if m['Mvid'].int():
g = guids.Get(m['Mvid'].int())
print >>sys.stdout, '{:s} {:s}'.format(res.str(), g.str())
modules.append((res.str(), g))
else:
print >>sys.stdout, '{:s}'.format(res.str())
# collect assemblies
L.send("Enumerating {:d} assemblies.".format(len(tables['Assembly'])))
assembly = {}
for i, a in enumerate(tables['Assembly']):
res = strings.field(a['Name'].int())
assembly[i+1] = res.str()
# for each permission that points to an Assembly
perms = ((p['Parent'].Index(), p['Action'].str(), p['PermissionSet'].int()) for p in tables['DeclSecurity'] if p['Parent']['Tag'].Get() == 'Assembly')
L.send("Listing properties from each permission.")
for mi, ma, bi in perms:
permset = blobs.field(bi)['data'].cast(clr.PermissionSet)
attributes = []
for attr in permset['attributes']:
props = attr['Properties']['data'].cast(Fields)
res = {}
for f in props['Fields']:
res[ f['FieldOrPropName'].str() ] = f['Value'].get()
attributes.append(res)
res = {}
map(res.update, attributes)
print >>sys.stdout, '\t{:s} : {:s} : {:s}'.format(assembly[mi], ma, ', '.join('{:s}={:s}'.format(k, strify(v)) for k, v in res.viewitems()))
|
[
"arizvisa@gmail.com"
] |
arizvisa@gmail.com
|
0969d72a4236cb5feccfde67e764dd2271af3c61
|
3d19e1a316de4d6d96471c64332fff7acfaf1308
|
/Users/J/Julian_Todd/python-png-header-test.py
|
36dbf301d83f5b219c9b36fed197cfeb24851ab4
|
[] |
no_license
|
BerilBBJ/scraperwiki-scraper-vault
|
4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc
|
65ea6a943cc348a9caf3782b900b36446f7e137d
|
refs/heads/master
| 2021-12-02T23:55:58.481210
| 2013-09-30T17:02:59
| 2013-09-30T17:02:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,316
|
py
|
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
import scraperwiki
import urllib
import base64
scraperwiki.utils.httpresponseheader("Content-Type", "image/PNG")
url = 'http://scraperwiki.com/media/images/intro_slides/intro4.png'
pngbin = urllib.urlopen(url).read()
scraperwiki.dumpMessage({"content":base64.encodestring(pngbin), "message_type":"console", "encoding":"base64"})
|
[
"pallih@kaninka.net"
] |
pallih@kaninka.net
|
abe0ed484c1bcfa8535da2651e1123f7e700260f
|
0178e6a705ee8aa6bb0b0a8512bf5184a9d00ded
|
/Sungjin/Math/n11050/11050.py
|
5428a370db68b311085325f58386ff3cee0933cd
|
[] |
no_license
|
comojin1994/Algorithm_Study
|
0379d513abf30e3f55d6a013e90329bfdfa5adcc
|
965c97a9b858565c68ac029f852a1c2218369e0b
|
refs/heads/master
| 2021-08-08T14:55:15.220412
| 2021-07-06T11:54:33
| 2021-07-06T11:54:33
| 206,978,984
| 0
| 1
| null | 2020-05-14T14:06:46
| 2019-09-07T14:23:31
|
Python
|
UTF-8
|
Python
| false
| false
| 340
|
py
|
import sys
input = sys.stdin.readline
fac = {0:1, 1:1, 2:2, 3:6}
N, K = map(int, input().strip().split())
def facto(n):
if n in fac.keys():
return fac[n]
result = n * facto(n-1)
fac[n] = result
return result
facto(10)
def com(n,k):
result = facto(n)//(facto(k)*facto(n-k))
return result
print(com(N,K))
|
[
"comojin1994@gmail.com"
] |
comojin1994@gmail.com
|
8647c5a51442aadd068dcd58c6a1fb7470e8818a
|
80b7f2a10506f70477d8720e229d7530da2eff5d
|
/uhd_restpy/testplatform/sessions/ixnetwork/globals/protocolstack/eapoudpglobals/nacsettings/nactlv/apptyperef/apptyperef.py
|
43ad9c9e986be6cfddf60fcd98fab1edd7c4fb48
|
[
"MIT"
] |
permissive
|
OpenIxia/ixnetwork_restpy
|
00fdc305901aa7e4b26e4000b133655e2d0e346a
|
c8ecc779421bffbc27c906c1ea51af3756d83398
|
refs/heads/master
| 2023-08-10T02:21:38.207252
| 2023-07-19T14:14:57
| 2023-07-19T14:14:57
| 174,170,555
| 26
| 16
|
MIT
| 2023-02-02T07:02:43
| 2019-03-06T15:27:20
|
Python
|
UTF-8
|
Python
| false
| false
| 5,524
|
py
|
# MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
from uhd_restpy.base import Base
from uhd_restpy.files import Files
if sys.version_info >= (3, 5):
from typing import List, Any, Union
class AppTypeRef(Base):
"""TLV Application Type
The AppTypeRef class encapsulates a required appTypeRef resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'appTypeRef'
_SDM_ATT_MAP = {
'Name': 'name',
'ObjectId': 'objectId',
'Value': 'value',
}
_SDM_ENUM_MAP = {
}
def __init__(self, parent, list_op=False):
super(AppTypeRef, self).__init__(parent, list_op)
@property
def NacApps(self):
"""
Returns
-------
- obj(uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.eapoudpglobals.nacsettings.nactlv.apptyperef.nacapps.nacapps.NacApps): An instance of the NacApps class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from uhd_restpy.testplatform.sessions.ixnetwork.globals.protocolstack.eapoudpglobals.nacsettings.nactlv.apptyperef.nacapps.nacapps import NacApps
if len(self._object_properties) > 0:
if self._properties.get('NacApps', None) is not None:
return self._properties.get('NacApps')
return NacApps(self)
@property
def Name(self):
# type: () -> str
"""
Returns
-------
- str: AppType Name.
"""
return self._get_attribute(self._SDM_ATT_MAP['Name'])
@Name.setter
def Name(self, value):
# type: (str) -> None
self._set_attribute(self._SDM_ATT_MAP['Name'], value)
@property
def ObjectId(self):
# type: () -> str
"""
Returns
-------
- str: Unique identifier for this object
"""
return self._get_attribute(self._SDM_ATT_MAP['ObjectId'])
@property
def Value(self):
# type: () -> int
"""
Returns
-------
- number: AppType ID.
"""
return self._get_attribute(self._SDM_ATT_MAP['Value'])
@Value.setter
def Value(self, value):
# type: (int) -> None
self._set_attribute(self._SDM_ATT_MAP['Value'], value)
def update(self, Name=None, Value=None):
# type: (str, int) -> AppTypeRef
"""Updates appTypeRef resource on the server.
Args
----
- Name (str): AppType Name.
- Value (number): AppType ID.
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
def find(self, Name=None, ObjectId=None, Value=None):
# type: (str, str, int) -> AppTypeRef
"""Finds and retrieves appTypeRef resources from the server.
All named parameters are evaluated on the server using regex. The named parameters can be used to selectively retrieve appTypeRef resources from the server.
To retrieve an exact match ensure the parameter value starts with ^ and ends with $
By default the find method takes no parameters and will retrieve all appTypeRef resources from the server.
Args
----
- Name (str): AppType Name.
- ObjectId (str): Unique identifier for this object
- Value (number): AppType ID.
Returns
-------
- self: This instance with matching appTypeRef resources retrieved from the server available through an iterator or index
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._select(self._map_locals(self._SDM_ATT_MAP, locals()))
def read(self, href):
"""Retrieves a single instance of appTypeRef data from the server.
Args
----
- href (str): An href to the instance to be retrieved
Returns
-------
- self: This instance with the appTypeRef resources from the server available through an iterator or index
Raises
------
- NotFoundError: The requested resource does not exist on the server
- ServerError: The server has encountered an uncategorized error condition
"""
return self._read(href)
|
[
"andy.balogh@keysight.com"
] |
andy.balogh@keysight.com
|
aa9f33b52226fb2e8f1ef81ab0dddd5b904536b7
|
24bc6f0a0a7b4a04c3289fe96368de81a7bd5191
|
/scdaily.py
|
7a1704c6456a06aceecf2de6e7a99f2e2d082a8d
|
[] |
no_license
|
xiaol/foreign_news_crawler
|
b0b39a22f498a21d303664715a33645306ac28cb
|
f7471a5e6f884408a6fe451dfe293bf92b61db3c
|
refs/heads/master
| 2021-03-24T10:18:33.989684
| 2016-03-21T06:48:30
| 2016-03-21T06:48:30
| 40,290,260
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,972
|
py
|
# -*- coding: utf-8 -*-
# 繁体
from crawler_framework.page import get_page
from crawler_framework.Logger import INFO, DBG, ERR
from lxml import etree
from StringIO import StringIO
import traceback
import redis
import time
from Cleaners.langconv import *
r = redis.StrictRedis(host='localhost', port=6379)
source = u"美南新闻"
def scdaily_crawler(url):
text = get_page(url)
parser = etree.HTMLParser()
tree = etree.parse(StringIO(text), parser)
story_links = tree.xpath('.//td[@class="kuang2"]//a')
for story_link in story_links:
try:
story_text_link = "http://www.scdaily.com/" + story_link.get("href")
except:
continue
try:
if r.sismember('duplicates', story_text_link) == True:
continue
story_title = story_link.text.strip()
story_title = Converter('zh-hans').convert(story_title)
story_info = get_text(story_text_link, story_title)
story_text = story_info['content']
if len(story_text) == 0:
continue
r.sadd('duplicates', story_text_link)
r.rpush('stories', story_info)
except:
print traceback.format_exc(),url
pass
def get_text(url, story_title):
text = get_page(url)
parser = etree.HTMLParser()
tree = etree.parse(StringIO(text), parser)
create_time = time.strftime('%Y-%m-%d %H:%M:%S')
story_text = []
count = 0
imgnum = 0
for x in tree.find('.//td[@align="center"]').iter():
try:
if x.tag == "p":
t = x.text.strip()
t = Converter('zh-hans').convert(t)
if len(t) != 0:
dict = {}
dict[str(count)] = {}
dict[str(count)]["txt"] = t
count += 1
story_text.append(dict)
if x.tag == "br":
t = x.tail.strip()
if len(t) != 0:
dict = {}
dict[str(count)] = {}
dict[str(count)]["txt"] = t
count += 1
story_text.append(dict)
if x.tag == "img":
dict = {}
dict[str(count)] = {}
dict[str(count)]["img"] = x.get("src")
count += 1
story_text.append(dict)
imgnum += 1
except:
pass
story_info = {
'content': story_text,
'source': source,
'title': story_title,
'url': url,
'create_time': create_time,
'imgnum': imgnum,
'source_url': url,
'sourceSiteName': source
}
return story_info
if __name__ == "__main__":
scdaily_crawler(url="http://www.scdaily.com/Newslist_more.aspx?Bid=48&Cid=28")
scdaily_crawler(url="http://www.scdaily.com/Newslist_more.aspx?Bid=48&Cid=34")
|
[
"zuoyuantc@126.com"
] |
zuoyuantc@126.com
|
b3e9fc513baf6b7485ab74e36b4b174fc32fa36e
|
b22b0760b29d24cff24eda9d1c114094fd1a588f
|
/Python/Easy/1160. Find Words That Can Be Formed by Characters.py
|
eda11a25b8d2c586204944db2b82eddef85448f7
|
[] |
no_license
|
MridulGangwar/Leetcode-Solutions
|
bbbaa06058a7b3e7621fc54050e344c06a256080
|
d41b1bbd762030733fa271316f19724d43072cd7
|
refs/heads/master
| 2022-03-07T12:20:33.485573
| 2022-02-21T07:22:38
| 2022-02-21T07:22:38
| 231,700,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
class Solution(object):
def create_dic(self,char):
d={}
for i in char:
if i not in d:
d[i]=1
else:
d[i]+=1
return d
def countCharacters(self, words, chars):
"""
:type words: List[str]
:type chars: str
:rtype: int
"""
dic_chars=self.create_dic(chars)
result=0
for word in words:
dic_word = self.create_dic(word)
count=0
for key in dic_word:
if key in dic_chars and dic_word[key] <= dic_chars[key]:
count+=1
if count==len(dic_word):
result+=len(word)
return result
|
[
"singhmridul1@gmail.com"
] |
singhmridul1@gmail.com
|
a14fe8bbbf3d0059b982760ec9e6f20dc9de0cd6
|
33dba187e9fd855534c466268012f5589eca9e5d
|
/pleiades/bulkup/__init__.py
|
fad8d7eed5eb05d290ebc7c90f44c37d024e9084
|
[] |
no_license
|
isawnyu/pleiades-bulkup
|
c21f75b911fbc1864376647b7e56bccef12d87e5
|
9e70382d2f4abd057c9385228796c4cc2f7b931b
|
refs/heads/master
| 2021-01-10T10:24:32.033323
| 2016-04-06T07:26:38
| 2016-04-06T07:26:38
| 51,455,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 810
|
py
|
from AccessControl.SecurityManagement import newSecurityManager
from Products.CMFCore.utils import getToolByName
from Products.CMFUid.interfaces import IUniqueIdGenerator, \
IUniqueIdAnnotationManagement, IUniqueIdHandler
from zope.component import provideUtility
def secure(context, username):
membership = getToolByName(context, 'portal_membership')
user=membership.getMemberById(username).getUser()
newSecurityManager(None, user.__of__(context.acl_users))
def setup_cmfuid(context):
provideUtility(
getToolByName(context, 'portal_uidgenerator'), IUniqueIdGenerator)
provideUtility(
getToolByName(context, 'portal_uidannotation'),
IUniqueIdAnnotationManagement)
provideUtility(
getToolByName(context, 'portal_uidhandler'), IUniqueIdHandler)
|
[
"sean.gillies@gmail.com"
] |
sean.gillies@gmail.com
|
bfcdcf7f283e5a3ddfe38976aabedc3679d0df3a
|
ed7cd7760c708720f5a847a02b0c3a50cca0175e
|
/examples/placeholder.py
|
a61739b86f87b4f7733e7bb9f6ea01bac1fb0fd0
|
[
"MIT"
] |
permissive
|
jcapriot/aurora
|
bf98b1236e7dc43e0189df71725f7f862d271984
|
08d5ccc671054a2b646a4effb412a2ed48314646
|
refs/heads/main
| 2023-09-05T00:07:16.984109
| 2021-10-27T02:49:41
| 2021-10-27T02:49:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 589
|
py
|
"""
Placeholder for example
===========================
This example is a placeholder that uses the sphinx-gallery syntax
for creating examples
"""
import aurora
import numpy as np
import matplotlib.pyplot as plt
###############################################################################
# Step 1
# ------
# Some description of what we are doing in step one
x = np.linspace(0, 4*np.pi, 100)
# take the sin(x)
y = np.sin(x)
###############################################################################
# Step 2
# ------
# Plot it
fig, ax = plt.subplots(1, 1)
ax.plot(x, y)
|
[
"lindseyheagy@gmail.com"
] |
lindseyheagy@gmail.com
|
fd6330691ccb8216b80605439df638507cbcadea
|
c92f43835821d8df2b93dfd781f890e56891f849
|
/Python3/136. Single Number.py
|
08c551b8c58bbccbf71e1502468350056064ee29
|
[] |
no_license
|
iamdoublewei/Leetcode
|
f4ae87ed8c31537098790842a72cafa5747d8588
|
e36f343aab109b051a9c3a96956c50b5580c7c15
|
refs/heads/master
| 2022-11-06T01:31:56.181800
| 2022-11-04T20:07:35
| 2022-11-04T20:07:35
| 71,944,123
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 611
|
py
|
'''
Given a non-empty array of integers, every element appears twice except for one. Find that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it without using extra memory?
Example 1:
Input: [2,2,1]
Output: 1
Example 2:
Input: [4,1,2,1,2]
Output: 4
'''
class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums.sort()
for i in range(0, len(nums), 2):
if i+1<len(nums) and nums[i] != nums[i+1]:
return nums[i]
return nums[len(nums) - 1]
|
[
"iamdoublewei@gmail.com"
] |
iamdoublewei@gmail.com
|
3431b32a3dfd191d918b9c47842bad2a65b9ff7d
|
ecfa863dd3c5826c1df82df92860439616f4b0f4
|
/常见算法.py
|
2eb7fff127951601364d6b0d4e870272a7e4c1e5
|
[
"MIT"
] |
permissive
|
TinlokLee/Algorithm
|
92affce8f20f692b8f1784dbc8d995dec58ede61
|
2230377222209c99929b2e7430798be420c73420
|
refs/heads/master
| 2020-04-11T23:38:49.122767
| 2018-12-17T18:36:36
| 2018-12-17T18:36:36
| 162,174,065
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,523
|
py
|
# 冒泡排序
def bubble_sort(arr):
for i in range(len(arr) - 1):
for j in range(len(arr) - i - 1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
return arr
'''
冒泡排序:
重复遍历待排序列,一次比较两个相邻元素(A-Z0-9)按升序排,直到没有需要交换的元素
时间复制度 O(n)
'''
# 选择排序
def selection_sort(nd):
n = len(nd)
for i in range(n-1):
min_index = i
for j in range(i+1, n):
if nd[j] < nd[min_index]:
min_index = j
if min_index != i:
nd[i], nd[min_index] = nd[min_index], nd[i]
'''
选择排序:
待排序列取最大(小)元素,放在序列起始位置,再从序列中选最值,放末尾,依此类推
数据移动排序
时间复制度O(n2)
'''
# 插入排序
def insert_sort(nd):
for i in range(1, len(nd)):
for j in range(i, 0, -1):
if nd[j] < nd[j-1]:
nd[j], nd[j-1] = nd[j-1], nd[j]
'''
插入排序:
有序序列,从后向前扫描,找到相应位置并插入
时间复制度O(n)
'''
# 快速排序
def quick_sort(nd):
if len(nd) == 0: return
tem = nd[0]
for i in range(len(nd)-1):
less = quick_sort([i for i in nd[1:] if tem < i])
gerter = quick_sort([i for i in nd[1:] if tem > i])
return less + [tem] + gerter
|
[
"noreply@github.com"
] |
TinlokLee.noreply@github.com
|
3a6b3e8740f8ae94a19ceb717f7a32d089c78bf0
|
ec85250addb7357dfe7bb3e0680d53fc7b0fd8fb
|
/examples/docs_snippets/docs_snippets/integrations/dagstermill/notebook_outputs.py
|
defefa97d47cec4e5bcd41f8d4299e2f663056b1
|
[
"Apache-2.0"
] |
permissive
|
dagster-io/dagster
|
6adb5deee8bcf3ea1866a6a64f2ed81e1db5e73a
|
fe21995e0402878437a828c6a4244025eac8c43b
|
refs/heads/master
| 2023-09-05T20:46:08.203794
| 2023-09-05T19:54:52
| 2023-09-05T19:54:52
| 131,619,646
| 8,565
| 1,154
|
Apache-2.0
| 2023-09-14T21:57:37
| 2018-04-30T16:30:04
|
Python
|
UTF-8
|
Python
| false
| false
| 751
|
py
|
# start_notebook
# my_notebook.ipynb
import dagstermill
dagstermill.yield_result(3, output_name="my_output")
# end_notebook
# start_py_file
from dagstermill import ConfigurableLocalOutputNotebookIOManager, define_dagstermill_op
from dagster import Out, file_relative_path, job, op
my_notebook_op = define_dagstermill_op(
name="my_notebook",
notebook_path=file_relative_path(__file__, "./notebooks/my_notebook.ipynb"),
output_notebook_name="output_notebook",
outs={"my_output": Out(int)},
)
@op
def add_two(x):
return x + 2
@job(
resource_defs={
"output_notebook_io_manager": ConfigurableLocalOutputNotebookIOManager(),
}
)
def my_job():
three, _ = my_notebook_op()
add_two(three)
# end_py_file
|
[
"noreply@github.com"
] |
dagster-io.noreply@github.com
|
a7ca652897c1a319681cfd0644e3acbbff503f72
|
f6ff58f0bcc22731f246de979bc4ff98216b7fa9
|
/FriendsListHolder.py
|
a1914e7749cc895bca8be04d4c65fcc90cc11e63
|
[] |
no_license
|
stephenhky/fb_analysis
|
80e0492a35ea265e1c399ba0039b278513c5edb3
|
93f4ca590b57812f8cb124c1eeb7946cbf24d96b
|
refs/heads/master
| 2020-05-18T04:42:59.069845
| 2013-09-06T20:21:21
| 2013-09-06T20:21:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 399
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 25 17:35:46 2013
@author: hok1
"""
import fbtools
class FriendListHolder:
def __init__(self, selfuid, access_token):
self.selfuid = selfuid
self.access_token = access_token
self.friend_uids = fbtools.getFriendUIDList(selfuid, access_token)
def check_iffriend(self, uid):
return (uid in self.friend_uids)
|
[
"stephenhky@yahoo.com.hk"
] |
stephenhky@yahoo.com.hk
|
d161ba21b9554ec5054d115609033279790f3297
|
844e0cd4ffbe1ead05b844508276f66cc20953d5
|
/test/utilityfortest.py
|
a2a80aae6eaf0ece9c2d9766dd6d227194034b68
|
[] |
no_license
|
Archanciel/cryptopricer
|
a256fa793bb1f2d65b5c032dd81a266ee5be79cc
|
00c0911fe1c25c1da635dbc9b26d45be608f0cc5
|
refs/heads/master
| 2022-06-29T13:13:22.435670
| 2022-05-11T20:37:43
| 2022-05-11T20:37:43
| 100,196,449
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,271
|
py
|
import os,sys,inspect
import re
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from datetimeutil import DateTimeUtil
class UtilityForTest:
'''
This class contains static utility methods used by some unit test classes. It avoids code duplication.
'''
@staticmethod
def getFormattedDateTimeComponentsForArrowDateTimeObj(dateTimeObj):
'''
Return dateTimeObjYearStr, dateTimeObjMonthStr, dateTimeObjDayStr, dateTimeObjHourStr,
dateTimeObjMinuteStr corresponding to the passed Arrow date object
:param dateTimeObj: passed Arrow date object
:return:
'''
dateTimeObjDayStr = dateTimeObj.format('DD')
dateTimeObjMonthStr = dateTimeObj.format('MM')
dateTimeObjYearStr = dateTimeObj.format('YY')
dateTimeObjHourStr = dateTimeObj.format('HH')
dateTimeObjMinuteStr = dateTimeObj.format('mm')
return dateTimeObjYearStr, dateTimeObjMonthStr, dateTimeObjDayStr, dateTimeObjHourStr, dateTimeObjMinuteStr
@staticmethod
def removeOneEndPriceFromResult(resultStr):
'''
Used to remove unique price from RT request results or variable date/time price request results
:param resultStr:
:return:
'''
patternNoWarning = r"(.*) ([\d\.]*)"
patternOneWarning = r"(.*) ([\d\.]*)(\n.*)" #in raw string, \ must not be escaped (\\n not working !)
match = re.match(patternOneWarning, resultStr)
if (match):
if len(match.groups()) == 3:
# here, resultStr contains a warning like in
# BTC/USD on CCCAGG: 30/01/18 01:51R 11248.28\nWarning - unsupported command -ebitfinex in request btc usd 0 all -ebitfinex !
return match.group(1) + match.group(3)
match = re.match(patternNoWarning, resultStr)
if (match):
if len(match.groups()) == 2:
# the case for resultStr containing BTC/USD on CCCAGG: 30/01/18 01:49R 11243.72 for example !
return match.group(1)
return ()
@staticmethod
def removeTwoEndPricesFromResult(resultStr):
'''
Used to remove two prices from RT request results with -f (fiat) option or variable date/time price request
results with -f (fiat) option
:param resultStr:
:return:
'''
patternNoWarning = r"(.*) (?:[\d\.]*) (?:[\d\.]*)"
patternOneWarning = r"(.*) (?:[\d\.]*) (?:[\d\.]*)(\n.*)" #in raw string, \ must not be escaped (\\n not working !)
match = re.match(patternOneWarning, resultStr)
if (match):
if len(match.groups()) == 2:
# here, resultStr contains a warning like in
# BTC/USD on CCCAGG: 30/01/18 01:51R 11248.28\nWarning - unsupported command -ebitfinex in request btc usd 0 all -ebitfinex !
return match.group(1) + match.group(2)
match = re.match(patternNoWarning, resultStr)
if (match):
if len(match.groups()) == 1:
# the case for resultStr containing BTC/USD on CCCAGG: 30/01/18 01:49R 11243.72 for example !
return match.group(1)
return ()
@staticmethod
def removeAllPricesFromCommandValueResult(resultStr):
'''
Used to remove multiple prices from RT request results or variable date/time price request results
:param resultStr:
:return:
'''
# pattern for this kind of output result:
# 0.06180355 ETH/100 USD on AVG: 03/03/21 20:03M 1618.03 which correspond to request
# eth usd 0 binance -v100usd
patternNoWarningValueOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (.*) (?:[\d\.]*)"
patternOneWarningValueOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (.*) (?:[\d\.]*(\n.*))"
# pattern for this kind of output result:
# 60851.6949266 CHSB/1.46530881 BTC/1000 USD.Kraken on HitBTC: 06/03/21 20:00R 0.00002408 0.0164334 which correspond to request
# chsb btc 0 hitbtc -v1000usd -fusd.kraken
patternNoWarningValueAndFiatOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (\w*/)(?:[\d\.]* )(.*) (?:[\d\.]*) (?:[\d\.]*)"
patternOneWarningValueAndFiatOption = r"(?:[\d\.]*) (\w*/)(?:[\d\.]*) (\w*/)(?:[\d\.]* )(.*) (?:[\d\.]*) (?:[\d\.]*(\n.*))"
# pattern for this kind of output result:
# CHSB/BTC/USD.Kraken on HitBTC: 06/03/21 20:00R 0.00002408 0.0164334 which correspond to request
# chsb btc 0 hitbtc -fusd.kraken
patternNoWarningFiatOption = '(.*) (?:[\d\.]*) (?:[\d\.]*)'
patternOneWarningFiatOption = '(.*) (?:[\d\.]*) (?:[\d\.]*(\n.*))'
match = re.match(patternOneWarningValueAndFiatOption, resultStr)
if match != None and len(match.groups()) == 4:
return match.group(1) + match.group(2) + match.group(3) + match.group(4)
match = re.match(patternNoWarningValueAndFiatOption, resultStr)
if match != None and len(match.groups()) == 3:
return match.group(1) + match.group(2) + match.group(3)
match = re.match(patternOneWarningValueOption, resultStr)
if match != None and len(match.groups()) == 3:
return match.group(1) + match.group(2) + match.group(3)
match = re.match(patternNoWarningValueOption, resultStr)
if match != None and len(match.groups()) == 2:
return match.group(1) + match.group(2)
match = re.match(patternOneWarningFiatOption, resultStr)
if match != None and len(match.groups()) == 2:
return match.group(1) + match.group(2)
match = re.match(patternNoWarningFiatOption, resultStr)
if match != None and len(match.groups()) == 1:
return match.group(1)
else:
return ()
@staticmethod
def extractDateTimeStr(resultStr):
dateTimePattern = r"(\d*/\d*/\d* \d*:\d*)"
s = re.search(dateTimePattern, resultStr)
if s != None:
if len(s.groups()) == 1:
group = s.group(1)
return group
@staticmethod
def doAssertAcceptingOneMinuteDateTimeDifference(unitTest,
nowDayStr,
nowHourStr,
nowMinuteStr,
nowMonthStr,
nowYearStr,
requestResultNoEndPrice,
expectedPrintResultNoDateTimeNoEndPrice):
"""
This method verifies that the passed real time request result requestResultNoEndPrice
date/time value correspond to now +/- 60 seconds. The purpose is to avoid a test
failure due to the fact that the crypto price provider was requested at, say,
11:54:59 (now value) and returns a result with time 11:55.
:param unitTest:
:param nowDayStr:
:param nowHourStr:
:param nowMinuteStr:
:param nowMonthStr:
:param nowYearStr:
:param requestResultNoEndPrice:
:param expectedPrintResultNoDateTimeNoEndPrice:
:return:
"""
actualDateTimeStr = UtilityForTest.extractDateTimeStr(requestResultNoEndPrice)
expectedDateTimeStr = '{}/{}/{} {}:{}'.format(nowDayStr, nowMonthStr, nowYearStr, nowHourStr,
nowMinuteStr)
actualDateTimeStamp = DateTimeUtil.dateTimeStringToTimeStamp(actualDateTimeStr, 'Europe/Zurich',
'DD/MM/YY HH:mm')
expectedDateTimeStamp = DateTimeUtil.dateTimeStringToTimeStamp(expectedDateTimeStr, 'Europe/Zurich',
'DD/MM/YY HH:mm')
unitTest.assertAlmostEqual(actualDateTimeStamp, expectedDateTimeStamp, delta=60)
unitTest.assertEqual(expectedPrintResultNoDateTimeNoEndPrice,
requestResultNoEndPrice.replace(actualDateTimeStr, ''))
if __name__ == '__main__':
now = DateTimeUtil.localNow('Europe/Zurich')
nowMonthStr, nowDayStr, nowHourStr, nowMinuteStr = UtilityForTest.getFormattedDateTimeComponentsForArrowDateTimeObj(now)
print("{}/{} {}:{}".format(nowDayStr, nowMonthStr, nowHourStr, nowMinuteStr))
|
[
"jp.schnyder@gmail.com"
] |
jp.schnyder@gmail.com
|
afb205b56da7ad95a6986ebaf0243e43e79cb058
|
cc2f611a2d837cb81dd6957253388c683c849b0b
|
/Problems/mergeSort.py
|
fbe6e1c4228d7a5291e444bd9366d8941e2dc22b
|
[] |
no_license
|
tylors1/Leetcode
|
3474dec224a5376f0c360d3fce9fb8030fe17539
|
e78129616468fa02abc1850ad7e7b26ddbdec871
|
refs/heads/master
| 2021-04-06T01:38:35.337711
| 2018-07-25T00:26:36
| 2018-07-25T00:26:36
| 125,107,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 414
|
py
|
def merge(A,start,mid,end):
L = A[start:mid]
R = A[mid:end]
j = i = 0
k = start
for l in range(k,end):
if j >= len(R) or (i < len(L) and L[i] < R[j]):
A[l] = L[i]
i = i + 1
else:
A[l] = R[j]
j = j + 1
def mergeSort(A,p,r):
if r - p > 1:
mid = int((p+r)/2)
mergeSort(A,p,mid)
mergeSort(A,mid,r)
merge(A,p,mid,r)
A = [20, 30, 21, 15, 42, 45, 31, 0, 9]
mergeSort(A,0,len(A))
print A
|
[
"tylors1@gmail.com"
] |
tylors1@gmail.com
|
715948755c322ba64467c7fcb1b7473bdd0c5ce0
|
8ad5ab7236dcb6717b56b4a494eac3fcc08d2c62
|
/redap/services/__init__.py
|
ef310db23b459f0521c2d1728dcbf59e02b3e05d
|
[
"MIT"
] |
permissive
|
bgedney/redap
|
2335664416660f6420b5ed8c10f2cbff5a8a86d8
|
34d6338c4d8fe330ce3a40d2a694456964f1496d
|
refs/heads/master
| 2020-12-31T21:45:52.353173
| 2018-07-17T09:50:07
| 2018-07-17T09:50:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
# -*- coding: utf-8 -*-
from .user import UserService
from .group import GroupService
users = UserService()
groups = GroupService()
|
[
"rbw@vault13.org"
] |
rbw@vault13.org
|
d4e92c7f4952c0665079f06b03c4e86bb6cc9cfa
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_55/168.py
|
bd9a92e7f38786c5038cbe13c574cf7d59e4685b
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 498
|
py
|
f = open('1.in','r')
o = open('out.dat','w')
n = int(f.readline())
for i in xrange(n):
R,k,N = f.readline().split()
R = int(R)
k = int(k)
N = int(N)
g = f.readline().split()
g = [int(g[j]) for j in xrange(N)]
y = 0
p = 0
for j in xrange(R):
m = 0
p0 = p
while (m+g[p]) <= k:
y += g[p]
m+=g[p]
if (p+1)<N:
p +=1
else:
p=0
if p == p0:
break
o.write('Case #' + str(i+1) + ': '+ str(y) +'\n')
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
7d85d2c7ce76018934193471af9c073a3e71d51b
|
be8d0f0aadcac53f90a34716153fe56ed1d44b11
|
/edx/app/edxapp/venvs/edxapp/lib/python2.7/site-packages/consent/migrations/0001_initial.py
|
a351521d395faef7c07ec0f37a0b147a1a1c8366
|
[] |
no_license
|
AlaaSwedan/edx
|
5353e6afa7c75d63b6c28150b6ef54180d3ddc84
|
73fec97eb2850e67e5f57e391641116465424d88
|
refs/heads/master
| 2021-09-01T14:53:48.342510
| 2017-12-27T09:46:39
| 2017-12-27T09:46:39
| 115,434,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,752
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import consent.mixins
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('enterprise', '0024_enterprisecustomercatalog_historicalenterprisecustomercatalog'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='DataSharingConsent',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(help_text='Name of the user whose consent state is stored.', max_length=255)),
('granted', models.NullBooleanField(help_text='Whether consent is granted.')),
('course_id', models.CharField(help_text='Course key for which data sharing consent is granted.', max_length=255)),
('enterprise_customer', models.ForeignKey(related_name='enterprise_customer_consent', to='enterprise.EnterpriseCustomer', on_delete=django.db.models.deletion.CASCADE)),
],
options={
'abstract': False,
'verbose_name': 'Data Sharing Consent Record',
'verbose_name_plural': 'Data Sharing Consent Records',
},
bases=(consent.mixins.ConsentModelMixin, models.Model),
),
migrations.CreateModel(
name='HistoricalDataSharingConsent',
fields=[
('id', models.IntegerField(verbose_name='ID', db_index=True, auto_created=True, blank=True)),
('created', model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, verbose_name='created', editable=False)),
('modified', model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, verbose_name='modified', editable=False)),
('username', models.CharField(help_text='Name of the user whose consent state is stored.', max_length=255)),
('granted', models.NullBooleanField(help_text='Whether consent is granted.')),
('course_id', models.CharField(help_text='Course key for which data sharing consent is granted.', max_length=255)),
('history_id', models.AutoField(serialize=False, primary_key=True)),
('history_date', models.DateTimeField()),
('history_type', models.CharField(max_length=1, choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')])),
('enterprise_customer', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.DO_NOTHING, db_constraint=False, blank=True, to='enterprise.EnterpriseCustomer', null=True)),
('history_user', models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True)),
],
options={
'ordering': ('-history_date', '-history_id'),
'get_latest_by': 'history_date',
'verbose_name': 'historical Data Sharing Consent Record',
},
),
migrations.AlterUniqueTogether(
name='datasharingconsent',
unique_together=set([('enterprise_customer', 'username', 'course_id')]),
),
]
|
[
"root@tatweer02.cxmgqbjhlc0u3gsvisa212wuxe.fx.internal.cloudapp.net"
] |
root@tatweer02.cxmgqbjhlc0u3gsvisa212wuxe.fx.internal.cloudapp.net
|
705b4a858fa80c737536a7f601d2cb7a67ae2372
|
f5a53f0f2770e4d7b3fdace83486452ddcc996e1
|
/netbox/tenancy/api/urls.py
|
5762f9a0d52cea35a2c7ded2b71d8c314c3c4906
|
[
"Apache-2.0"
] |
permissive
|
fireman0865/PingBox
|
35e8fc9966b51320d571b63967e352a134022128
|
0f00eaf88b88e9441fffd5173a1501e56c13db03
|
refs/heads/master
| 2023-01-20T07:55:59.433046
| 2020-03-15T13:36:31
| 2020-03-15T13:36:31
| 247,466,832
| 1
| 0
|
Apache-2.0
| 2022-12-26T21:30:32
| 2020-03-15T12:59:16
|
Python
|
UTF-8
|
Python
| false
| false
| 544
|
py
|
from rest_framework import routers
from . import views
class TenancyRootView(routers.APIRootView):
"""
Tenancy API root view
"""
def get_view_name(self):
return 'Tenancy'
router = routers.DefaultRouter()
router.APIRootView = TenancyRootView
# Field choices
router.register('_choices', views.TenancyFieldChoicesViewSet, basename='field-choice')
# Tenants
router.register('tenant-groups', views.TenantGroupViewSet)
router.register('tenants', views.TenantViewSet)
app_name = 'tenancy-api'
urlpatterns = router.urls
|
[
"fireman0865@gmail.com"
] |
fireman0865@gmail.com
|
1c105c840d3ca23d00e5560635f2901d287b81b1
|
1dbb9ae42a82a854a0fba3eb8e4d0482e0a08a44
|
/util/constraint_applyer.py
|
66ef43d9a3a40e8244eedd91f4c448cdedb00fe1
|
[] |
no_license
|
roderiklagerweij/PyThings
|
1de9b7d13ed6dc999f958493f23dac053c2edcd5
|
1f565dd1a9431ff18de0b3d260d32e0d84874cb6
|
refs/heads/master
| 2021-08-24T00:53:20.949058
| 2017-10-25T12:26:33
| 2017-10-25T12:26:33
| 106,086,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,173
|
py
|
import settings
from util import view_finder
__author__ = 'Roderik'
def apply_width_constraint(parent, id_list):
views = []
for id in id_list:
views.extend(view_finder.find_views_with_id(id, parent))
for i in range(10):
parent.measure()
parent.post_measure(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
parent.apply_gravity(0, 0, settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
max_width = 0
for view in views:
if view.width > max_width:
max_width = view.width
for view in views:
view.width = max_width
def apply_height_constraint(parent, id_list):
views = []
for id in id_list:
views.extend(view_finder.find_views_with_id(id, parent))
for i in range(10):
parent.measure()
parent.post_measure(settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
parent.apply_gravity(0, 0, settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT)
max_height = 0
for view in views:
if view.height > max_height:
max_height = view.height
for view in views:
view.height = max_height
|
[
"="
] |
=
|
8106d055672db8dfdbc2cbdec865e90f66edfc0c
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/PearRay/core/render.py
|
bf8e13532a626d16cc4b1257bd976588d9231315
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,851
|
py
|
import bpy
import os
import sys
import time
import subprocess
import threading
import re
import importlib
import numpy as np
from collections import deque
from .. import export
pearray_package = __import__(__name__.split('.')[0])
class PearRayRender(bpy.types.RenderEngine):
bl_idname = 'PEARRAY_RENDER'
bl_label = "PearRay"
#bl_use_preview = True
bl_use_exclude_layers = True
@staticmethod
def _setup_package():
addon_prefs = bpy.context.user_preferences.addons[pearray_package.__package__].preferences
if addon_prefs.package_dir:
sys.path.append(bpy.path.resolve_ncase(bpy.path.abspath(addon_prefs.package_dir)))
return importlib.import_module("pypearray")
def _proc_wait(self, renderer):
time.sleep(0.25)
# User interrupts the rendering
if self.test_break():
try:
renderer.stop()
print("<<< PEARRAY INTERRUPTED >>>")
except OSError:
pass
return False
if renderer.finished:
return False
return True
def _handle_render_stat(self, renderer):
stat = renderer.status
line = "Pass %s S %i R %i EH %i BH %i" % (renderer.currentPass+1,
stat['global.pixel_sample_count'],
stat['global.ray_count'],
stat['global.entity_hit_count'],
stat['global.background_hit_count'])
self.update_stats("", "PearRay: Rendering [%s]..." % (line))
self.update_progress(stat.percentage)
def render(self, scene):
addon_prefs = bpy.context.user_preferences.addons[pearray_package.__package__].preferences
pr = PearRayRender._setup_package()
pr.Logger.instance.verbosity = pr.LogLevel.DEBUG if addon_prefs.verbose else pr.LogLevel.INFO
specDesc = pr.SpectrumDescriptor.createStandardSpectral()
import tempfile
render = scene.render
x = int(render.resolution_x * render.resolution_percentage * 0.01)
y = int(render.resolution_y * render.resolution_percentage * 0.01)
print("<<< START PEARRAY >>>")
blendSceneName = bpy.data.filepath.split(os.path.sep)[-1].split(".")[0]
if not blendSceneName:
blendSceneName = "blender_scene"
sceneFile = ""
renderPath = ""
# has to be called to update the frame on exporting animations
scene.frame_set(scene.frame_current)
renderPath = bpy.path.resolve_ncase(bpy.path.abspath(render.filepath))
if not render.filepath:
renderPath = tempfile.gettempdir()
if scene.pearray.keep_prc:
sceneFile = os.path.normpath(renderPath + "/scene.prc")
else:
sceneFile = tempfile.NamedTemporaryFile(suffix=".prc").name
self.update_stats("", "PearRay: Exporting data")
scene_exporter = export.Exporter(sceneFile, scene)
scene_exporter.write_scene(pr)
self.update_stats("", "PearRay: Starting render")
environment = pr.SceneLoader.loadFromFile(sceneFile)
toneMapper = pr.ToneMapper(x, y)
toneMapper.colorMode = pr.ToneColorMode.SRGB
toneMapper.gammaMode = pr.ToneGammaMode.NONE
toneMapper.mapperMode = pr.ToneMapperMode.NONE
colorBuffer = pr.ColorBuffer(x,y,pr.ColorBufferMode.RGBA)
environment.registry.set('/renderer/film/width', x)
environment.registry.set('/renderer/film/height', y)
if addon_prefs.verbose:
print("Registry:")
print(environment.registry.dump())
pr_scene = environment.sceneFactory.create()
if not pr_scene:
self.report({'ERROR'}, "PearRay: could not create pearray scene instance")
print("<<< PEARRAY FAILED >>>")
return
factory = pr.RenderFactory(specDesc, pr_scene, environment.registry, renderPath)
addon_prefs = bpy.context.user_preferences.addons[pearray_package.__package__].preferences
renderer = factory.create()
if not renderer:
self.report({'ERROR'}, "PearRay: could not create pearray render instance")
print("<<< PEARRAY FAILED >>>")
return
environment.setup(renderer)
if not os.path.exists(renderPath):
os.makedirs(renderPath)
threads = 0
if scene.render.threads_mode == 'FIXED':
threads = scene.render.threads
renderer.start(scene.render.tile_x, scene.render.tile_y, threads)
# Update image
result = self.begin_result(0, 0, x, y)
layer = result.layers[0]
def update_image():
colorBuffer.map(toneMapper, renderer.output.spectral)
arr = np.array(colorBuffer, copy=False)
arr = np.reshape(np.flip(arr,0), (x*y,4), 'C')
layer.passes["Combined"].rect = arr
self.update_result(result)
update_image()
prog_start = time.time()
img_start = time.time()
while self._proc_wait(renderer):
prog_end = time.time()
if addon_prefs.show_progress_interval < (prog_end - prog_start):
self._handle_render_stat(renderer)
prog_start = prog_end
if addon_prefs.show_image_interval > 0:
img_end = time.time()
if addon_prefs.show_image_interval < (img_end - img_start):
update_image()
img_start = img_end
update_image()
self.end_result(result)
environment.save(renderer, toneMapper, True)
if not scene.pearray.keep_prc:
os.remove(sceneFile)
self.update_stats("", "")
print("<<< PEARRAY FINISHED >>>")
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
df8527c28632b605a3e07d6553228145bf09d138
|
4bb6a8cbd7ac887ec4abc6abc97f0cb17415e82d
|
/Chapter 10 Class/die3.py
|
feecac569c11d71ca755666f71d074ecfcb2c079
|
[] |
no_license
|
jbhennes/CSCI-220-Programming-1
|
cdc9cab47b4a79dccabf014224a175674e9a7155
|
ac9e85582eeb51a205981674ffdebe8a5b93a205
|
refs/heads/master
| 2021-01-01T03:54:50.723923
| 2016-05-02T16:06:55
| 2016-05-02T16:06:55
| 57,902,553
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 970
|
py
|
# die3.py
# Class that defines a six-sided die object.
##from random import randint
class Die:
def __init__(self):
self.faceValue = 1
def getFaceValue(self):
return self.faceValue
def setFaceValue(self, value):
if value > 0 and value < 7:
self.faceValue = value
else:
print "Don't try to cheat!"
def roll(self):
value = randint(1,6)
self.faceValue = value
def __str__(self):
return "Die Value: " + str(self.faceValue)
##
## def roll(self):
## self.faceValue = randrange(1, 7)
##
## def getValue(self):
## return self.faceValue
##
## def setValue(self, value):
## if value >= 1 and value <= 6:
## self.faceValue = int(value)
## else:
## self.faceValue = randrange(1, 7)
##
## def __str__(self):
## return "The die's value is: " + str(self.faceValue)
|
[
"jbhennes@g.cofc.edu"
] |
jbhennes@g.cofc.edu
|
6fc3612754da59a46ae1c9ecb026451637ba5f7e
|
64d7d065c92ba5a4a7f3aab1eabeb786c7864ad0
|
/profiles_api/migrations/0002_profilefeeditem.py
|
72eefdaa9399c90de5c7811a3f54cb5df3e9e16e
|
[
"MIT"
] |
permissive
|
alardosa/profiles-rest-api
|
1bfa20fbf442ce7487a07b7a531ce84436007326
|
d7ee8606b9d0c16ea7b4d8d43ca22f94658c6ee7
|
refs/heads/master
| 2021-09-27T06:49:37.161651
| 2020-02-20T06:14:56
| 2020-02-20T06:14:56
| 238,153,303
| 1
| 0
|
MIT
| 2021-09-22T18:32:03
| 2020-02-04T08:01:23
|
Python
|
UTF-8
|
Python
| false
| false
| 790
|
py
|
# Generated by Django 3.0.3 on 2020-02-12 01:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='ProfileFeedItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status_text', models.CharField(max_length=255)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user_profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
[
"alardosa@gmail.com"
] |
alardosa@gmail.com
|
8a8be3e81dcfb6ac9e3a2c296b49136138a80d34
|
ba0cbdae81c171bd4be7b12c0594de72bd6d625a
|
/MyToontown/py2/toontown/coghq/BattleBlocker.pyc.py
|
ca6c57e799dcef409f253ece85f07fca491e4c22
|
[] |
no_license
|
sweep41/Toontown-2016
|
65985f198fa32a832e762fa9c59e59606d6a40a3
|
7732fb2c27001264e6dd652c057b3dc41f9c8a7d
|
refs/heads/master
| 2021-01-23T16:04:45.264205
| 2017-06-04T02:47:34
| 2017-06-04T02:47:34
| 93,279,679
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,757
|
py
|
# 2013.08.22 22:18:02 Pacific Daylight Time
# Embedded file name: toontown.coghq.BattleBlocker
from pandac.PandaModules import *
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from otp.level import BasicEntities
from toontown.toonbase import ToontownGlobals
from direct.directnotify import DirectNotifyGlobal
class BattleBlocker(BasicEntities.DistributedNodePathEntity):
__module__ = __name__
notify = DirectNotifyGlobal.directNotify.newCategory('BattleBlocker')
def __init__(self, cr):
BasicEntities.DistributedNodePathEntity.__init__(self, cr)
self.suitIds = []
self.battleId = None
return
def setActive(self, active):
self.active = active
def announceGenerate(self):
BasicEntities.DistributedNodePathEntity.announceGenerate(self)
self.initCollisionGeom()
def disable(self):
self.ignoreAll()
self.unloadCollisionGeom()
BasicEntities.DistributedNodePathEntity.disable(self)
def destroy(self):
BasicEntities.DistributedNodePathEntity.destroy(self)
def setSuits(self, suitIds):
self.suitIds = suitIds
def setBattle(self, battleId):
self.battleId = battleId
def setBattleFinished(self):
self.ignoreAll()
def initCollisionGeom(self):
self.cSphere = CollisionSphere(0, 0, 0, self.radius)
self.cSphereNode = CollisionNode('battleBlocker-%s-%s' % (self.level.getLevelId(), self.entId))
self.cSphereNode.addSolid(self.cSphere)
self.cSphereNodePath = self.attachNewNode(self.cSphereNode)
self.cSphereNode.setCollideMask(ToontownGlobals.WallBitmask)
self.cSphere.setTangible(0)
self.enterEvent = 'enter' + self.cSphereNode.getName()
self.accept(self.enterEvent, self.__handleToonEnter)
def unloadCollisionGeom(self):
if hasattr(self, 'cSphereNodePath'):
self.ignore(self.enterEvent)
del self.cSphere
del self.cSphereNode
self.cSphereNodePath.removeNode()
del self.cSphereNodePath
def __handleToonEnter(self, collEntry):
self.notify.debug('__handleToonEnter, %s' % self.entId)
self.startBattle()
def startBattle(self):
if not self.active:
return
callback = None
if self.battleId != None and self.battleId in base.cr.doId2do:
battle = base.cr.doId2do.get(self.battleId)
if battle:
self.notify.debug('act like we collided with battle %d' % self.battleId)
callback = battle.handleBattleBlockerCollision
elif len(self.suitIds) > 0:
for suitId in self.suitIds:
suit = base.cr.doId2do.get(suitId)
if suit:
self.notify.debug('act like we collided with Suit %d ( in state %s )' % (suitId, suit.fsm.getCurrentState().getName()))
callback = suit.handleBattleBlockerCollision
break
self.showReaction(callback)
return
def showReaction(self, callback = None):
if not base.localAvatar.wantBattles:
return
track = Sequence()
if callback:
track.append(Func(callback))
track.start()
if __dev__:
def attribChanged(self, *args):
self.unloadCollisionGeom()
self.initCollisionGeom()
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\coghq\BattleBlocker.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:18:03 Pacific Daylight Time
|
[
"sweep14@gmail.com"
] |
sweep14@gmail.com
|
91903958737a641020ce96b9d7a046cac600a9f3
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03474/s109158950.py
|
f80c891d3a6c6b61b79f29390528b638d503cc9e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
a, b = map(int, input().split())
s = input()
if s[:a].isdigit() and s[a] =='-' and s[-b:].isdigit():
print('Yes')
else:
print('No')
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
75129cdcd47f48a1bd12a6212201012ffae573e7
|
e7fcc1d64cd95805918ab1b5786bf81a92f973ef
|
/2017/day22/test_day22.py
|
dce7c09a50212dd957967cad85f56d655a5738d5
|
[] |
no_license
|
trolen/advent-of-code
|
8145c1e36fea04e53d4b7a885efcc2da71fbfe57
|
0a4e022a6a810d86e044a15036a2f5778f0d38af
|
refs/heads/master
| 2023-02-26T13:11:58.341006
| 2023-02-20T23:22:27
| 2023-02-20T23:22:27
| 54,579,550
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
#! /usr/bin/env python3
import unittest
import day22
class TestDay22(unittest.TestCase):
def setUp(self):
data = [
'..#',
'#..',
'...'
]
self._cluster = day22.Cluster(data)
def test_part1(self):
self.assertEqual(5, self._cluster.run(7))
self.assertEqual(41, self._cluster.run(70))
self.assertEqual(5587, self._cluster.run(10000))
def test_part2(self):
self.assertEqual(26, self._cluster.run(100, part2=True))
self.assertEqual(2511944, self._cluster.run(10000000, part2=True))
if __name__ == '__main__':
unittest.main()
|
[
"timothy.rolen@gmail.com"
] |
timothy.rolen@gmail.com
|
e8754bac157825e839ab2223e27997427aa4e5ca
|
5831b0293cbb6f9e0660ac4ec952cbdb047d051d
|
/tests/test_dataset_wikipedia.py
|
0a969b520c5fc07dc8a4b52634061969907c9eae
|
[
"Apache-2.0"
] |
permissive
|
mdlynch37/textacy
|
03e3287fd8ee8bd4d06e48b7b87edf8324a987e5
|
c1c7376a84a62faeee496e9b8cc2a29edc28c7d1
|
refs/heads/master
| 2021-01-20T09:29:54.627035
| 2017-12-04T05:31:14
| 2017-12-04T05:31:14
| 101,596,726
| 0
| 0
| null | 2017-08-28T02:36:30
| 2017-08-28T02:36:30
| null |
UTF-8
|
Python
| false
| false
| 1,831
|
py
|
from __future__ import absolute_import, unicode_literals
import os
import shutil
import tempfile
import unittest
from textacy import data_dir
from textacy.compat import unicode_
from textacy.datasets.wikipedia import Wikipedia
DATASET = Wikipedia(lang='en', version='latest')
@unittest.skipUnless(
DATASET.filename, 'Wikipedia dataset must be downloaded before running tests')
class WikipediaTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp(
prefix='test_datasets_', dir=os.path.dirname(os.path.abspath(__file__)))
@unittest.skip("No need to download a new dataset every time")
def test_download(self):
dataset = Wikipedia(data_dir=self.tempdir)
dataset.download()
self.assertTrue(os.path.exists(dataset.filename))
def test_ioerror(self):
dataset = Wikipedia(data_dir=self.tempdir)
with self.assertRaises(IOError):
_ = list(dataset.texts())
def test_texts(self):
for text in DATASET.texts(limit=3):
self.assertIsInstance(text, unicode_)
def test_texts_limit(self):
for limit in (1, 5, 10):
self.assertEqual(sum(1 for _ in DATASET.texts(limit=limit)), limit)
def test_texts_min_len(self):
for min_len in (100, 200, 500):
self.assertTrue(
all(len(text) >= min_len
for text in DATASET.texts(min_len=min_len, limit=10)))
def test_records(self):
for record in DATASET.records(limit=3):
self.assertIsInstance(record, dict)
def test_records_fast(self):
for record in DATASET.records(limit=3, fast=True):
self.assertIsInstance(record, dict)
# TODO: test individual parsing functions
def tearDown(self):
shutil.rmtree(self.tempdir)
|
[
"burton@chartbeat.com"
] |
burton@chartbeat.com
|
d011cac7d86757a68f11dfebdb8dd76101cecde5
|
8114909d3ed6ee1e6d1fbe14a37723015ab53af6
|
/googleplus_test.py
|
3db6be74738968a4cf7a2c5828069a7657cb4865
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
notenoughneon/activitystreams-unofficial
|
b0c66d48eb3b43d68b76df069ba237dce9d77489
|
1f45bde45d3d18ef39d69ebd698e248233b94ce9
|
refs/heads/master
| 2021-01-18T03:01:03.101619
| 2014-08-15T15:00:00
| 2014-08-15T23:48:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,058
|
py
|
"""Unit tests for googleplus.py.
See apiclient/http.py for details on using RequestMockBuilder to mock out Google
API calls. (This is the current doc on apiclient mocks, but it doesn't mention
RequestMockBuilder:
https://developers.google.com/api-client-library/python/guide/mocks )
"""
__author__ = ['Ryan Barrett <activitystreams@ryanb.org>']
import copy
import json
import appengine_config
import httplib2
from apiclient import discovery
from apiclient import http
appengine_config.GOOGLE_CLIENT_ID = 'my client id'
appengine_config.GOOGLE_CLIENT_SECRET = 'my client secret'
import googleplus
from oauth_dropins import googleplus as oauth_googleplus
from oauth_dropins.webutil import testutil
from oauth_dropins.webutil import util
DISCOVERY_DOC = appengine_config.read('googleplus_api_discovery.json')
def tag_uri(name):
return util.tag_uri('plus.google.com', name)
ACTIVITY_GP = { # Google+
'kind': 'plus#activity',
'verb': 'post',
'id': '001',
'actor': {'id': '444', 'displayName': 'Charles'},
'object': {
'content': 'my post',
'url': 'http://plus.google.com/001',
},
}
ACTIVITY_AS = copy.deepcopy(ACTIVITY_GP) # ActivityStreams
ACTIVITY_AS['id'] = tag_uri('001')
ACTIVITY_AS['object']['author'] = ACTIVITY_GP['actor']
ACTIVITY_AS['object']['to'] = [{'objectType':'group', 'alias':'@public'}]
COMMENT_GP = { # Google+
'kind': 'plus#comment',
'verb': 'post',
'id': '888',
'actor': {'id': '777', 'displayName': 'Eve'},
'object': {'content': 'my content'},
'inReplyTo': [{'url': 'http://post/url'}],
}
COMMENT_AS = copy.deepcopy(COMMENT_GP)
COMMENT_AS.update({ # ActivityStreams
'author': COMMENT_AS.pop('actor'),
'displayName': 'my content',
'content': 'my content',
'id': tag_uri('888'),
'url': 'http://post/url',
'to': [{'objectType':'group', 'alias':'@public'}],
})
PLUSONER = { # Google+
'kind': 'plus#person',
'id': '222',
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
}
LIKE = { # ActivityStreams
'id': tag_uri('001_liked_by_222'),
'url': 'http://plus.google.com/001',
'objectType': 'activity',
'verb': 'like',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('222'),
'displayName': 'Alice',
'url': 'https://profiles.google.com/alice',
'image': {'url': 'https://alice/picture'},
},
'displayName': 'Alice +1ed this.',
'content': '+1ed this.',
}
RESHARER = { # Google+
'kind': 'plus#person',
'id': '444',
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
}
SHARE = { # ActivityStreams
'id': tag_uri('001_shared_by_444'),
'url': 'http://plus.google.com/001',
'objectType': 'activity',
'verb': 'share',
'object': {'url': 'http://plus.google.com/001'},
'author': {
'kind': 'plus#person',
'id': tag_uri('444'),
'displayName': 'Bob',
'url': 'https://plus.google.com/bob',
'image': {'url': 'https://bob/picture'},
},
'displayName': 'Bob reshared this.',
'content': 'reshared this.',
}
ACTIVITY_GP_EXTRAS = copy.deepcopy(ACTIVITY_GP) # Google+
ACTIVITY_GP_EXTRAS['object'].update({
'replies': {'totalItems': 1},
'plusoners': {'totalItems': 1},
'resharers': {'totalItems': 1},
})
ACTIVITY_AS_EXTRAS = copy.deepcopy(ACTIVITY_GP_EXTRAS) # ActivityStreams
ACTIVITY_AS_EXTRAS['id'] = tag_uri('001')
ACTIVITY_AS_EXTRAS['object'].update({
'author': ACTIVITY_GP_EXTRAS['actor'],
'to': [{'objectType':'group', 'alias':'@public'}],
'replies': {'totalItems': 1, 'items': [COMMENT_AS]},
'tags': [LIKE, SHARE],
})
class GooglePlusTest(testutil.HandlerTest):
def setUp(self):
super(GooglePlusTest, self).setUp()
self.auth_entity = oauth_googleplus.GooglePlusAuth(
id='my_string_id',
user_json=json.dumps({
'displayName': 'Bob',
}),
creds_json=json.dumps({
'access_token': 'my token',
'client_id': appengine_config.GOOGLE_CLIENT_ID,
'client_secret': appengine_config.GOOGLE_CLIENT_SECRET,
'refresh_token': 'my refresh token',
'token_expiry': '',
'token_uri': '',
'user_agent': '',
'invalid': '',
}))
self.googleplus = googleplus.GooglePlus(auth_entity=self.auth_entity)
def tearDown(self):
oauth_googleplus.json_service = None
def init(self, **kwargs):
"""Sets up the API service from googleplus_test_discovery.
Pass a requestBuilder or http kwarg to inject expected HTTP requests and
responses.
"""
oauth_googleplus.json_service = discovery.build_from_document(
DISCOVERY_DOC, **kwargs)
def test_get_comment(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.comments.get': (None, json.dumps(COMMENT_GP)) # None means 200 OK
}))
self.assert_equals(COMMENT_AS, self.googleplus.get_comment('234'))
def test_get_activity(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.get': (None, json.dumps(ACTIVITY_GP))
}))
self.assert_equals([ACTIVITY_AS],
self.googleplus.get_activities(activity_id='234'))
def test_get_activities_no_extras_to_fetch(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (None, json.dumps({
'items': [ACTIVITY_GP, ACTIVITY_GP],
})),
},
# ACTIVITY_GP doesn't say there are any comments, +1s, or shares (via
# totalItems), so we shouldn't ask for them.
check_unexpected=True))
got = self.googleplus.get_activities(fetch_replies=True, fetch_likes=True,
fetch_shares=True)
self.assert_equals([ACTIVITY_AS, ACTIVITY_AS], got)
def test_get_activities_fetch_extras(self):
self.init()
http_seq = http.HttpMockSequence(
[({'status': '200'}, json.dumps({'items': [item]})) for item in
ACTIVITY_GP_EXTRAS,
# should only ask for these the first time, use the cache for the second
COMMENT_GP, PLUSONER, RESHARER,
ACTIVITY_GP_EXTRAS,
])
self.auth_entity.http = lambda: http_seq
cache = testutil.FakeCache()
self.assert_equals([ACTIVITY_AS_EXTRAS], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
# no new extras, so another request won't fill them in
activity = copy.deepcopy(ACTIVITY_AS)
for field in 'replies', 'plusoners', 'resharers':
activity['object'][field] = {'totalItems': 1}
self.assert_equals([activity], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True, cache=cache))
# TODO: resurrect?
# def test_get_activities_request_etag(self):
# self.init()
# http_seq = http.HttpMockSequence(
# [({'status': '200'}, json.dumps({'items': [item]}))])
# self.auth_entity.http = lambda: http_seq
# resp = self.googleplus.get_activities_response(
# fetch_replies=True, fetch_likes=True, fetch_shares=True)
# self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_response_etag(self):
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 200}),
json.dumps({'etag': '"my etag"'})),
}))
resp = self.googleplus.get_activities_response(
fetch_replies=True, fetch_likes=True, fetch_shares=True)
self.assertEquals('"my etag"', resp['etag'])
def test_get_activities_304_not_modified(self):
"""Requests with matching ETags return 304 Not Modified."""
self.init(requestBuilder=http.RequestMockBuilder({
'plus.activities.list': (httplib2.Response({'status': 304}), '{}'),
}))
self.assert_equals([], self.googleplus.get_activities(
fetch_replies=True, fetch_likes=True, fetch_shares=True))
|
[
"git@ryanb.org"
] |
git@ryanb.org
|
1c65020969dd11db886967e06b349d0c3a57a32b
|
2363cc167aa8bb8f652298025ca183a35f8c655e
|
/skytools/gzlog.py
|
2e601cd64502b172db9f3dd49ebfd1cb673b0558
|
[
"ISC"
] |
permissive
|
zzahti/python-skytools
|
a15d1e0ad6e02ca62d6596bd647766aa6e64eac7
|
b4633810efe9f7640fbc2028005be548fbad7ccb
|
refs/heads/master
| 2022-12-22T16:51:10.312223
| 2020-09-29T10:05:15
| 2020-09-29T10:05:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 742
|
py
|
"""Atomic append of gzipped data.
The point is - if several gzip streams are concatenated,
they are read back as one whole stream.
"""
import gzip
import io
__all__ = ('gzip_append',)
def gzip_append(filename, data, level=6):
"""Append a block of data to file with safety checks."""
# compress data
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, compresslevel=level, mode="w") as g:
g.write(data)
zdata = buf.getvalue()
# append, safely
with open(filename, "ab+", 0) as f:
f.seek(0, 2)
pos = f.tell()
try:
f.write(zdata)
except Exception as ex:
# rollback on error
f.seek(pos, 0)
f.truncate()
raise ex
|
[
"markokr@gmail.com"
] |
markokr@gmail.com
|
cd0066973556580295a757e52687dcc5550a2ef5
|
039f2c747a9524daa1e45501ada5fb19bd5dd28f
|
/ABC157/ABC157e.py
|
ff912cc8831fb416bb93d9b7e2507f2861b4dd5d
|
[
"Unlicense"
] |
permissive
|
yuto-moriizumi/AtCoder
|
86dbb4f98fea627c68b5391bf0cc25bcce556b88
|
21acb489f1594bbb1cdc64fbf8421d876b5b476d
|
refs/heads/master
| 2023-03-25T08:10:31.738457
| 2021-03-23T08:48:01
| 2021-03-23T08:48:01
| 242,283,632
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,522
|
py
|
# ABC157e
#print(bin(toBitSet([1, 2, 3, 2])))
# for i in range(97, 123):
# print(chr(i))
def main():
import sys
sys.setrecursionlimit(10**6)
# 再帰関数を使わない限りPypyで出すこと
class SegmentTree:
#####単位元######
ide_ele = 0
# num:n以上の最小の2のべき乗
def segfunc(self, x, y):
return x | y # 例としてmin関数を設定
def __init__(self, n):
super().__init__()
self.num = 2**(n-1).bit_length() # nは元々の配列の長さ
self.seg = [self.ide_ele]*(2*self.num+1)
def init(self, init_val): # セグ木にしたい配列を渡す
# set_val
for i in range(len(init_val)):
self.seg[i+self.num-1] = init_val[i]
# built
for i in range(self.num-2, -1, -1):
self.seg[i] = self.segfunc(self.seg[2*i+1], self.seg[2*i+2])
def update(self, k, x):
k += self.num-1
self.seg[k] = x
while k:
k = (k-1)//2
self.seg[k] = self.segfunc(self.seg[k*2+1], self.seg[k*2+2])
def query(self, p, q):
if q <= p:
return ide_ele
p += self.num-1
q += self.num-2
res = self.ide_ele
while q-p > 1:
if p & 1 == 0:
res = self.segfunc(res, self.seg[p])
if q & 1 == 1:
res = self.segfunc(res, self.seg[q])
q -= 1
p = p//2
q = (q-1)//2
if p == q:
res = self.segfunc(res, self.seg[p])
else:
res = self.segfunc(self.segfunc(res, self.seg[p]), self.seg[q])
return res
n = int(input())
s = list(input())
q = int(input())
def toBitSet(numberSet):
theSet = 0
for i in numberSet:
theSet = theSet | 1 << i
return theSet
def alphabetToZeroIndexed(alphabet):
return ord(alphabet) - 97
tree = SegmentTree(n)
for i in range(n):
tree.update(i+1, toBitSet([alphabetToZeroIndexed(s[i])]))
for _ in range(q):
a, b, c = input().split()
b = int(b)
if int(a) == 1:
tree.update(b, toBitSet([alphabetToZeroIndexed(c)]))
else:
c = int(c)
print(bin(tree.query(b, c+1)).count('1'))
if __name__ == '__main__':
main()
|
[
"kurvan1112@gmail.com"
] |
kurvan1112@gmail.com
|
0647f0fd2b71a765527da8668a5b0f8b95257b53
|
603519e0d087967caac72cce854dc7f1dfaa5262
|
/bioinformatics stronghold/GC.py
|
ddc7d57d820b55831cc86a23662f68acb74b711c
|
[] |
no_license
|
Morpheus2112/Rosalind-exercise
|
e591570521a12905864cb7e7f72b66816da7ae3a
|
e1047a5f6725e07c8cbf17594bfe4969cbc5d708
|
refs/heads/master
| 2022-07-25T00:07:17.316099
| 2020-02-16T07:18:21
| 2020-02-16T07:18:21
| 240,848,262
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,875
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 31 20:48:47 2017
@author: Memphis
"""
"""
Problem
The GC-content of a DNA string is given by the percentage of symbols in the
string that are 'C' or 'G'. For example, the GC-content of "AGCTATAG" is
37.5%. Note that the reverse complement of any DNA string has the same
GC-content.
DNA strings must be labeled when they are consolidated into a database. A
commonly used method of string labeling is called FASTA format. In this format,
the string is introduced by a line that begins with '>', followed by some
labeling information. Subsequent lines contain the string itself; the first line
to begin with '>' indicates the label of the next string.
In Rosalind's implementation, a string in FASTA format will be labeled by the ID
"Rosalind_xxxx", where"xxxx" denotes a four-digit code between 0000 and 9999.
Given: At most 10 DNA strings in FASTA format (of length at most 1 kbp each).
Return: The ID of the string having the highest GC-content, followed by the
GC-content of that string. Rosalind allows for a default error of 0.001 in all
decimal answers unless otherwise stated; please see the note on absolute error
below.
Sample Dataset
>Rosalind_6404
CCTGCGGAAGATCGGCACTAGAATAGCCAGAACCGTTTCTCTGAGGCTTCCGGCCTTCCC
TCCCACTAATAATTCTGAGG
>Rosalind_5959
CCATCGGTAGCGCATCCTTAGTCCAATTAAGTCCCTATCCAGGCGCTCCGCCGAAGGTCT
ATATCCATTTGTCAGCAGACACGC
>Rosalind_0808
CCACCCTCGTGGTATGGCTAGGCATTCAGGAACCGGAGAACGCTTCAGACCAGCCCGGAC
TGGGAACCTGCGGGCAGTAGGTGGAAT
Sample Output
Rosalind_0808
60.919540
"""
import sys
sys.path.append('../../')
import rosalind_utils
def gc():
records = rosalind_utils.read_fasta("rosalind_gc.txt")
gc_contents = [(desc, rosalind_utils.gc_content(seq)) for desc,seq in records]
max_gc_content = max(gc_contents, key=lambda x: x[1])
print max_gc_content[0]
print max_gc_content[1]*100
gc()
|
[
"palandswd@gmail.com"
] |
palandswd@gmail.com
|
b9b884312eaf1f4e5f6dda63fce03a207fa955bf
|
60c467d4afc722d284df0bd9d3cb5935fec7148e
|
/lib/python2.6/site-packages/mx/Stack/stackbench.py
|
d328678cd23a2f011304080e98664be4e0e62562
|
[
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-warranty-disclaimer",
"eGenix"
] |
permissive
|
Nuevosmedios/quicklearn-env
|
8ab9a1ba6303c97db5946684ad94c01755d4f482
|
6f777c8d842d42ec8006fc8780b278dc9b35409b
|
refs/heads/master
| 2021-01-01T20:12:19.961777
| 2013-03-12T14:18:35
| 2013-03-12T14:18:35
| 7,424,404
| 0
| 1
| null | 2020-07-25T20:32:38
| 2013-01-03T14:54:02
|
Python
|
UTF-8
|
Python
| false
| false
| 2,736
|
py
|
#!/usr/local/bin/python -O
""" stackbench - stack implementation benchmark
Copyright (c) 2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2012, eGenix.com Software GmbH; mailto:info@egenix.com
See the documentation for further information on copyrights,
or contact the author. All Rights Reserved.
"""
import time
from mx import Stack
import UserStack
from sys import argv, exit
try:
numtests, pushes, pops = eval(argv[1]), eval(argv[2]), eval(argv[3])
assert pushes >= pops
except:
print 'usage: stackbench.py <ntests> <pushes> <pops>, where <pushes> >= <pops>'
exit(1)
def test(reps, func):
start_cpu = time.clock()
for i in xrange(reps):
x = func()
return time.clock() - start_cpu
def method1():
x = [] # built-in list
push = x.append
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = x[-1]; del x[-1]
if 0:
def method1a():
x = [] # built-in list
push = x.append
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = x.pop()
def method2():
x = None # built-in tuples
for i in range(pushes): x = ('spam'+'i',x)
for i in range(pops): (top, x) = x
def method3():
s = Stack.Stack() # Stack
push = s.push
pop = s.pop
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = pop()
def method3a():
s = Stack.Stack() # Stack
push = s.push
for i in range(pushes): push('spam'+'i')
t = s.pop_many(pops) # pop all at once
def method3b():
s = Stack.Stack() # Stack
push = s.push
for i in range(pushes): s << ('spam'+'i')
for i in range(pops): top = s >> 1
def method3c():
s = Stack.Stack() # Stack
l = [''] * pushes
for i in range(pushes): l[i] = ('spam'+'i')
s.push_many(l)
s.pop_many(pops)
def method4():
s = UserStack.UserStack() # UserStack
push = s.push
pop = s.pop
for i in range(pushes): push('spam'+'i')
for i in range(pops): top = pop()
print 'list: ', test(numtests, method1) # run func 20 tests
print 'tuples:', test(numtests, method2)
print 'Stack (with push + pop):', test(numtests, method3)
print 'Stack (with push + pop_many):', test(numtests, method3a)
print 'Stack (with << + >>):', test(numtests, method3b)
print 'Stack (with push_many + pop_many):', test(numtests, method3c)
print 'UserStack:', test(numtests, method4)
|
[
"root@web4.web4.nuevosmedios.ws"
] |
root@web4.web4.nuevosmedios.ws
|
a6eab24270bd68ff179f3e93105407ecd98842c0
|
19c764171650292706e3f4197530222e3611f3d5
|
/legislator/platform/platform.py
|
2ba53434c0067b027a728afbd28d712adbdf9f34
|
[
"CC0-1.0"
] |
permissive
|
thewayiam/twly_fileHandler
|
6eaa3d914796c8484a66fe097c0112054b4105fd
|
b5757657621710cc7293f3825202e4c0a0f3051d
|
refs/heads/master
| 2020-05-21T20:06:04.634397
| 2018-09-06T09:07:29
| 2018-09-06T09:07:29
| 11,305,913
| 5
| 1
| null | 2014-05-18T06:53:23
| 2013-07-10T07:58:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
sys.path.append('../../')
import re
import codecs
import db_settings
import ly_common
def personalPlatform(platform, id):
platform = '\n'.join(platform)
c.execute('''
UPDATE legislator_legislatordetail
SET platform = %s
WHERE id = %s
''', (platform, id))
def partyPlatform(platform, ad, party):
platform = '\n'.join(platform)
c.execute('''
UPDATE legislator_legislatordetail
SET platform = %s
WHERE ad = %s AND party = %s AND constituency = 0
''', (platform, ad, party))
conn = db_settings.con()
c = conn.cursor()
ad = 8
sourcetext = codecs.open(u"%d立委政見.txt" % ad, "r", "utf-8")
lines = []
for line in sourcetext.readlines():
line = line.strip()
lines.append(line)
if not line:
uid = ly_common.GetLegislatorId(c, lines[0])
if uid: # if this line is name of legislators
legislator_id = ly_common.GetLegislatorDetailId(c, uid, ad)
else:
print lines[0]
raw_input()
personalPlatform(lines[1:], legislator_id)
lines = []
conn.commit()
print u'8立委政見Succeed'
sourcetext = codecs.open(u"%d政黨政見.txt" % ad, "r", "utf-8")
lines = []
for line in sourcetext.readlines():
line = line.strip()
lines.append(line)
if not line:
partyPlatform(lines[1:], ad, lines[0])
lines = []
conn.commit()
print u'8政黨政見Succeed'
|
[
"twly.tw@gmail.com"
] |
twly.tw@gmail.com
|
0f4ab7fd73baac9575e00a1c630ead91c45359c4
|
638b207f3c7706cb0cb9dd1d6cf112ab91f69837
|
/0x11-python-network_1/10-my_github.py
|
6631d3a4c0e87af20fcaa3bd5fc5ecf014e28088
|
[] |
no_license
|
NasserAbuchaibe/holbertonschool-higher_level_programming
|
c30a066dfd4525e936b4121f930c3a63e6d911d6
|
5b0c11423e11bd9201cc057775c099eb0259f305
|
refs/heads/master
| 2022-12-16T17:15:57.775143
| 2020-09-25T03:00:56
| 2020-09-25T03:00:56
| 259,379,453
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 386
|
py
|
#!/usr/bin/python3
""" Python script that fetches https://intranet.hbtn.io/status
"""
import requests
from sys import argv
from requests.auth import HTTPBasicAuth
if __name__ == "__main__":
""" ok
"""
url = "https://api.github.com/users/"
usr = argv[1]
passw = argv[2]
r = requests.get("{}{}".format(url, usr), auth=(usr, passw))
print(r.json().get('id'))
|
[
"nasser_abuchaibe@hotmail.com"
] |
nasser_abuchaibe@hotmail.com
|
e6eba436be2d42a357bff3df3729c704ace538d6
|
3faeae950e361eb818830ad210f30a6232e5d7f1
|
/wepppy/nodb/mods/locations/location_mixin.py
|
f24103800b72ee919308ab976b101894aeab0f1f
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
rogerlew/wepppy
|
401e6cee524073209a4445c680b43ea0c6102dfc
|
1af4548d725b918b73ee022f2572a63b5194cce0
|
refs/heads/master
| 2023-07-21T12:56:26.979112
| 2023-07-13T23:26:22
| 2023-07-13T23:26:22
| 125,935,882
| 10
| 6
|
NOASSERTION
| 2023-03-07T20:42:52
| 2018-03-20T00:01:27
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,404
|
py
|
# Copyright (c) 2016-2018, University of Idaho
# All rights reserved.
#
# Roger Lew (rogerlew@gmail.com)
#
# The project described was supported by NSF award number IIA-1301792
# from the NSF Idaho EPSCoR Program and by the National Science Foundation.
import os
import json
import csv
from copy import deepcopy
from os.path import join as _join
from os.path import split as _split
from os.path import exists as _exists
import jsonpickle
# from wepppy.all_your_base import RasterDatasetInterpolator
from ...landuse import Landuse
from ...soils import Soils
from ...watershed import Watershed
from ...wepp import Wepp
from wepppy.wepp.soils.utils import read_lc_file, soil_specialization, soil_is_water
from wepppy.wepp.soils.utils import WeppSoilUtil
from ...base import NoDbBase, TriggerEvents
_thisdir = os.path.dirname(__file__)
_data_dir = _join(_thisdir, 'data')
class LocationMixin(object):
@property
def location_doms(self):
data_dir = self.data_dir
lc_dict = read_lc_file(_join(data_dir, self.lc_lookup_fn))
return set([lc_dict[k]['LndcvrID'] for k in lc_dict])
def remap_landuse(self):
data_dir = self.data_dir
with open(_join(data_dir, 'landcover_map.json')) as fp:
lc_map = json.load(fp)
location_doms = self.location_doms
landuse = Landuse.getInstance(self.wd)
landuse.lock()
# noinspection PyBroadException
try:
for topaz_id, dom in landuse.domlc_d.items():
if int(dom) not in location_doms:
landuse.domlc_d[topaz_id] = lc_map[dom]
landuse.dump_and_unlock()
except Exception:
landuse.unlock('-f')
raise
def modify_soils(self, default_wepp_type=None, lc_lookup_fn=None):
data_dir = self.data_dir
wd = self.wd
soils_dir = self.soils_dir
if default_wepp_type is None:
default_wepp_type = self.default_wepp_type
if lc_lookup_fn is None:
lc_lookup_fn = self.lc_lookup_fn
lc_dict = read_lc_file(_join(data_dir, lc_lookup_fn))
with open(_join(data_dir, 'lc_soiltype_map.json')) as fp:
soil_type_map = json.load(fp)
soils = Soils.getInstance(wd)
soils.lock()
# noinspection PyBroadException
try:
domsoil_d = soils.domsoil_d
landuse = Landuse.getInstance(wd)
domlc_d = landuse.domlc_d
_soils = {}
for topaz_id, mukey in domsoil_d.items():
dom = domlc_d[topaz_id]
wepp_type = soil_type_map.get(mukey, default_wepp_type)
replacements = lc_dict[(dom, wepp_type)]
k = '%s-%s-%s' % (mukey, wepp_type, dom)
src_fn = _join(soils_dir, '%s.sol' % mukey)
dst_fn = _join(soils_dir, '%s.sol' % k)
is_water = soil_is_water(src_fn)
if is_water:
_soils[mukey] = deepcopy(soils.soils[mukey])
_soils[mukey].area = 0.0
domsoil_d[topaz_id] = mukey
else:
if k not in _soils:
caller = ':'.join(_split(self._nodb)[-1].split('.')[::-1])
soil_u = WeppSoilUtil(src_fn)
mod_soil = soil_u.to_7778disturbed(replacements, hostname='dev.wepp.cloud')
mod_soil.write(dst_fn)
# soil_specialization(src_fn, dst_fn, replacements, caller=caller)
_soils[k] = deepcopy(soils.soils[mukey])
_soils[k].mukey = k
_soils[k].fname = '%s.sol' % k
_soils[k].area = 0.0
domsoil_d[topaz_id] = k
# need to recalculate the pct_coverages
watershed = Watershed.getInstance(self.wd)
for topaz_id, k in domsoil_d.items():
_soils[k].area += watershed.area_of(topaz_id)
for k in _soils:
coverage = 100.0 * _soils[k].area / watershed.wsarea
_soils[k].pct_coverage = coverage
soils.soils = _soils
soils.domsoil_d = domsoil_d
soils.dump_and_unlock()
except Exception:
soils.unlock('-f')
raise
|
[
"rogerlew@gmail.com"
] |
rogerlew@gmail.com
|
867c6f465a5f4fad71ff4486ff5dc653a0851c06
|
00c9828a8b3b7984cf835f22fca38cf75dce7bae
|
/umongo/frameworks/mongomock.py
|
6a731d95152fe6c01537d4d1c436804f03dd96e6
|
[
"MIT"
] |
permissive
|
KeithGao/umongo
|
d5bc32b8077af3cc409715bfd667af7a5ebfa461
|
04c7fbad4b4e2202d6e2b88c9cafe30fb8f62abe
|
refs/heads/master
| 2021-01-20T10:19:20.213449
| 2017-08-15T06:39:45
| 2017-08-15T06:39:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 250
|
py
|
from .pymongo import PyMongoBuilder
from mongomock.database import Database
# Mongomock aims at working like pymongo
class MongoMockBuilder(PyMongoBuilder):
@staticmethod
def is_compatible_with(db):
return isinstance(db, Database)
|
[
"emmanuel.leblond@gmail.com"
] |
emmanuel.leblond@gmail.com
|
80b1daabb5f26c6cff42c9e3b127effdf3808038
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02715/s161181019.py
|
6199ba7618c16ee2f820a448bd7e395cebbab40e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 528
|
py
|
def modpow(val, n, mod):
ret = 1
while n:
if n & 1:
ret = (ret * val) % mod
val = (val * val) % mod
n = n >> 1
return ret
mod = 10 ** 9 + 7
n, k = map(int, input().split())
my_dict = dict()
ret = 0
for i in range(k, 0, -1):
tmp = modpow(k // i, n, mod)
cnt = 2
while True:
val = i * cnt
if val > k:
break
else:
cnt += 1
tmp -= my_dict[val]
my_dict[i] = tmp
ret += tmp * i % mod
print(ret % mod)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b1c6b964d138e9cb8fad01dbbfce6c7646c358d5
|
d7e77abfa037a5e220980fdf197cb7b3c3b4cb47
|
/calvin/calvinsys/io/display.py
|
939f1eca551e16872b1d3d4640f7c9c2a077654f
|
[
"Apache-2.0"
] |
permissive
|
imriss/calvin-base
|
e82db61cb815644653b6c5f51f7b4cdb0151a211
|
31e450b80ce0c8fedc3042464de7e405ac714953
|
refs/heads/master
| 2020-12-25T14:39:04.250727
| 2017-05-03T17:03:38
| 2017-05-03T17:03:38
| 49,008,248
| 0
| 0
| null | 2016-08-22T17:03:04
| 2016-01-04T16:17:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,374
|
py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.runtime.south.plugins.io.display import display
class Display(object):
"""
Control a display
"""
def __init__(self):
self.display = display.Display()
def enable(self, enable):
"""
Enable/disable display
"""
self.display.enable(enable)
def show(self, text, textcolor, bgcolor):
"""
Display text
"""
self.display.show(text, textcolor, bgcolor)
def show_text(self, text):
self.display.show(text, None, None)
def clear(self):
"""
Clear display
"""
self.display.clear()
def register(node=None, actor=None):
"""
Called when the system object is first created.
"""
return Display()
|
[
"ola.angelsmark@ericsson.com"
] |
ola.angelsmark@ericsson.com
|
27a57f0967b722bc1d77bbcbc21be306922419ab
|
3b9b4049a8e7d38b49e07bb752780b2f1d792851
|
/src/ozone/tools/jhbuild/wayland.jhbuildrc
|
8e7a0e9b548aa70bdc496bdb8d1a8b6cd47eac28
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
webosce/chromium53
|
f8e745e91363586aee9620c609aacf15b3261540
|
9171447efcf0bb393d41d1dc877c7c13c46d8e38
|
refs/heads/webosce
| 2020-03-26T23:08:14.416858
| 2018-08-23T08:35:17
| 2018-09-20T14:25:18
| 145,513,343
| 0
| 2
|
Apache-2.0
| 2019-08-21T22:44:55
| 2018-08-21T05:52:31
| null |
UTF-8
|
Python
| false
| false
| 295
|
jhbuildrc
|
# -*- mode: python -*-
import os
use_local_modulesets = True
moduleset = os.getcwd() + '/wayland.modules'
modules = ['weston']
checkoutroot = os.getcwd() + '/../../../out/wayland/source'
prefix = os.getcwd() +'/../../../out/wayland/root'
autogenargs=''
os.environ['EGL_PLATFORM'] = 'wayland'
|
[
"changhyeok.bae@lge.com"
] |
changhyeok.bae@lge.com
|
39a8ca9fc07790aa0de27c59f2f09cf9d56cab18
|
3f9d28984403cdd84b984d7a82eb6136018966a4
|
/delete_pos_appointment/models/pos_appointment_delete.py
|
de31796cf079c85dbab76ae12d434ca0e6ec781b
|
[] |
no_license
|
joevm018/temasq
|
bee1c69aee4c87de415e66e236b40c28201e2951
|
74f003c6a009716bf26a6438f5ee48e7dfcbb030
|
refs/heads/master
| 2023-03-18T23:42:13.598853
| 2021-03-11T11:35:16
| 2021-03-11T11:35:16
| 346,671,390
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,896
|
py
|
from odoo import api, fields, models,_
from odoo.exceptions import UserError
class PosAppointmentDelete(models.Model):
_name = "pos.appointment.delete"
date_from = fields.Datetime('From', required=True)
date_to = fields.Datetime('To', required=True)
@api.multi
def cancel_archive_appointment(self):
dom = [('date_order', '<=', self.date_to), ('date_order', '>=', self.date_from)]
pos_orders = self.env['pos.order'].search(dom, order='date_order asc')
for pos_ord in pos_orders:
pos_ord.write({'state': 'cancel','active':False})
@api.multi
def cancel_appointment(self):
dom = [('date_order', '<=', self.date_to), ('date_order', '>=', self.date_from)]
pos_orders = self.env['pos.order'].search(dom, order='date_order asc')
pos_orders.action_cancel_pos_appt()
class PosOrder(models.Model):
_inherit = "pos.order"
active = fields.Boolean(default=True)
@api.multi
def action_cancel_pos_appt(self):
for order in self:
for ord_line in order.lines:
# if order.redeemed_gift_id:
if order.redeemed_package_id:
session_avail = self.env['combo.session'].search(
[('order_line_id', '=',ord_line.id),
('order_id', '=', ord_line.order_id.id)], limit=1)
ord_line.write({
'package_card_id': False,
'combo_session_id': session_avail.id,
})
session_avail.write({
'order_line_id': False,
'order_id': False,
'state': 'draft',
'redeemed_date': False,
})
if order.purchased_gift_card_ids:
disc_gift_card_vals = {
'purchased_date': False,
'partner_id': False,
'gift_order_id': False,
'state': 'new',
'discount_gift_card_amount': 0.0,
'remaining_amount': 0.0,
}
order.purchased_gift_card_ids.write(disc_gift_card_vals)
if order.purchased_package_card_ids:
package_card_vals = {
'purchased_date': False,
'partner_id': False,
'package_order_id': False,
'state': 'new',
'package_card_amount': 0.0,
'combo_session_ids': False,
}
order.purchased_package_card_ids.write(package_card_vals)
if not order.invoice_id and not order.statement_ids and not order.picking_id:
order.write({'state': 'cancel'})
|
[
"esreejishnu@gmail.com"
] |
esreejishnu@gmail.com
|
c0166a9998e05c2bd86f74fd787ec8923c9a7f9c
|
f62fd455e593a7ad203a5c268e23129473d968b6
|
/mistral-4.0.2/mistral/engine/default_executor.py
|
b360cc216ed783679d26b412e11415a5f31d00e3
|
[
"Apache-2.0"
] |
permissive
|
MinbinGong/OpenStack-Ocata
|
5d17bcd47a46d48ff9e71e2055f667836174242f
|
8b7650128cfd2fdf5d6c8bc4613ac2e396fb2fb3
|
refs/heads/master
| 2021-06-23T05:24:37.799927
| 2017-08-14T04:33:05
| 2017-08-14T04:33:05
| 99,709,985
| 0
| 2
| null | 2020-07-22T22:06:22
| 2017-08-08T15:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 5,258
|
py
|
# Copyright 2013 - Mirantis, Inc.
# Copyright 2016 - Brocade Communications Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from osprofiler import profiler
from mistral.actions import action_factory as a_f
from mistral.engine import base
from mistral.engine.rpc_backend import rpc
from mistral import exceptions as exc
from mistral.utils import inspect_utils as i_u
from mistral.workflow import utils as wf_utils
LOG = logging.getLogger(__name__)
class DefaultExecutor(base.Executor):
def __init__(self):
self._engine_client = rpc.get_engine_client()
@profiler.trace('executor-run-action', hide_args=True)
def run_action(self, action_ex_id, action_class_str, attributes,
action_params, safe_rerun, redelivered=False):
"""Runs action.
:param action_ex_id: Action execution id.
:param action_class_str: Path to action class in dot notation.
:param attributes: Attributes of action class which will be set to.
:param action_params: Action parameters.
:param safe_rerun: Tells if given action can be safely rerun.
:param redelivered: Tells if given action was run before on another
executor.
"""
def send_error_back(error_msg):
error_result = wf_utils.Result(error=error_msg)
if action_ex_id:
self._engine_client.on_action_complete(
action_ex_id,
error_result
)
return None
return error_result
if redelivered and not safe_rerun:
msg = (
"Request to run action %s was redelivered, but action %s"
" cannot be re-run safely. The only safe thing to do is fail"
" action."
% (action_class_str, action_class_str)
)
return send_error_back(msg)
action_cls = a_f.construct_action_class(action_class_str, attributes)
# Instantiate action.
try:
action = action_cls(**action_params)
except Exception as e:
msg = ("Failed to initialize action %s. Action init params = %s."
" Actual init params = %s. More info: %s"
% (action_class_str, i_u.get_arg_list(action_cls.__init__),
action_params.keys(), e))
LOG.warning(msg)
return send_error_back(msg)
# Run action.
try:
result = action.run()
# Note: it's made for backwards compatibility with already
# existing Mistral actions which don't return result as
# instance of workflow.utils.Result.
if not isinstance(result, wf_utils.Result):
result = wf_utils.Result(data=result)
except Exception as e:
msg = ("Failed to run action [action_ex_id=%s, action_cls='%s',"
" attributes='%s', params='%s']\n %s"
% (action_ex_id, action_cls, attributes, action_params, e))
LOG.exception(msg)
return send_error_back(msg)
# Send action result.
try:
if action_ex_id and (action.is_sync() or result.is_error()):
self._engine_client.on_action_complete(
action_ex_id,
result,
async_=True
)
except exc.MistralException as e:
# In case of a Mistral exception we can try to send error info to
# engine because most likely it's not related to the infrastructure
# such as message bus or network. One known case is when the action
# returns a bad result (e.g. invalid unicode) which can't be
# serialized.
msg = ("Failed to call engine's on_action_complete() method due"
" to a Mistral exception"
" [action_ex_id=%s, action_cls='%s',"
" attributes='%s', params='%s']\n %s"
% (action_ex_id, action_cls, attributes, action_params, e))
LOG.exception(msg)
return send_error_back(msg)
except Exception as e:
# If it's not a Mistral exception all we can do is only
# log the error.
msg = ("Failed to call engine's on_action_complete() method due"
" to an unexpected exception"
" [action_ex_id=%s, action_cls='%s',"
" attributes='%s', params='%s']\n %s"
% (action_ex_id, action_cls, attributes, action_params, e))
LOG.exception(msg)
return result
|
[
"gongwayne@hotmail.com"
] |
gongwayne@hotmail.com
|
6e628f32e0906b40690193765db3d1b9a953f010
|
59b3dce3c770e70b2406cc1dd623a2b1f68b8394
|
/python_1/lessons/funcalls.py
|
95393a4a7b9cdb9de7b774a53f683151fce9c03d
|
[] |
no_license
|
patrickbeeson/python-classes
|
04ed7b54fc4e1152a191eeb35d42adc214b08e39
|
b5041e71badd1ca2c013828e3b2910fb02e9728f
|
refs/heads/master
| 2020-05-20T07:17:36.693960
| 2015-01-23T14:41:46
| 2015-01-23T14:41:46
| 29,736,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 305
|
py
|
#!/usr/local/bin/python3
""" Take user input, convert to float, and print
out the number to two decimal places, with commas. """
import funcs
while True:
inval = input("Enter a number: ")
if not inval:
break
number = float(inval)
print(funcs.commareal("{0:.2f}".format(number)))
|
[
"patrickbeeson@gmail.com"
] |
patrickbeeson@gmail.com
|
b4a2b40674ce9d708d9bdbb27f367823166b87d9
|
92658cf5178f88e69ae243310425658a8df36c0d
|
/video_distributor/video_distributor/urls.py
|
b6f3128cc703215315dd3c1a7253763f8e878149
|
[] |
no_license
|
SeedofWind-demo2017/NSQ_Stress_Test
|
dc3df4126c0381c90f13631b2ba067530ab2e06a
|
9cd34588299effcf29a32affb4bc6714db0e26ac
|
refs/heads/master
| 2021-01-19T23:41:43.705136
| 2017-05-08T02:15:28
| 2017-05-08T02:15:28
| 89,020,247
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 532
|
py
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
# Examples:
# url(r'^blog/', include('blog.urls')),
url(r'^$', 'video.views.home', name='home'),
url(r'^stats/', 'video.views.stats', name='stats'),
url(r'^update_table/', 'video.views.home', name='update_table'),
url(r'^update_charts/', 'video.views.update_charts', name='update_charts'),
url(r'^update_stats/', 'video.views.update_stats', name='update_stats'),
url(r'^admin/', include(admin.site.urls)),
]
|
[
"cdzengpeiyun@gmail.com"
] |
cdzengpeiyun@gmail.com
|
c311a00b768166835f67364b125edcaba5999485
|
107e62a03254c9ebe2e1830977a47861633b0d33
|
/Meminfo.py
|
7d05844c1597727d24d601a90214c8582fe144eb
|
[] |
no_license
|
prasanna-ranganathan/mypython
|
bb798c0782cfb79a27b0730e924921b802da2a44
|
25fa93602e2465ec6ccb0c3ff30a2bbf90da96e4
|
refs/heads/master
| 2021-06-03T18:35:31.129399
| 2016-08-28T14:21:44
| 2016-08-28T14:22:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 437
|
py
|
#!/usr/bin/python
from __future__ import print_function
from collections import OrderedDict
def Meminfo():
meminfo = OrderedDict()
with open('/proc/meminfo') as file:
for line in file:
meminfo[line.split(':')[0].strip()] = line.split(':')[1].strip()
return meminfo
if __name__ == '__main__':
meminfo = Meminfo()
print('Total memory: {0}'.format(meminfo['MemTotal']))
print('Free memory: {0}'.format(meminfo['MemFree']))
|
[
"prassanna.mit@gmail.com"
] |
prassanna.mit@gmail.com
|
5da24ee2ef65a48b724f63e8744263375abd505d
|
d190750d6cb34e9d86ae96724cf4b56a2f57a74a
|
/tests/r/test_wage.py
|
9c3f558601d3b21c414e2b6f21cf0f7e147870e1
|
[
"Apache-2.0"
] |
permissive
|
ROAD2018/observations
|
a119f61a48213d791de0620804adb8d21c2ad9fb
|
2c8b1ac31025938cb17762e540f2f592e302d5de
|
refs/heads/master
| 2021-09-24T04:28:02.725245
| 2018-09-16T23:06:30
| 2018-09-16T23:06:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 502
|
py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.wage import wage
def test_wage():
"""Test module wage.py by downloading
wage.csv and testing shape of
extracted data has 3000 rows and 12 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = wage(test_path)
try:
assert x_train.shape == (3000, 12)
except:
shutil.rmtree(test_path)
raise()
|
[
"dustinviettran@gmail.com"
] |
dustinviettran@gmail.com
|
7a128e797059ae648a4e283127d383f99c3ebbac
|
360e1f69f4c0923c5d79bc82aa33c0fd4e80b71e
|
/LINKED_LISTALLMETHODS/insertionsortinsinglyll.py
|
75fc859bccc7a3af38a5fc3e4f0072b5a2fa8761
|
[] |
no_license
|
Vijay1234-coder/data_structure_plmsolving
|
04e52fe6c918313e13d39107a2ded8b47645bb12
|
d449b266295d1ae55613cdcfd9b22ad9cee3dfbe
|
refs/heads/master
| 2023-08-01T00:55:28.825972
| 2021-09-12T15:20:12
| 2021-09-12T15:20:12
| 387,782,783
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,085
|
py
|
class Node:
def __init__(self,data):
self.data=data
self.next_node=None
class LinkedList:
def __init__(self):
self.head=None
def display(self):
if self.head==None:
print("Linked List is empty!")
else:
n = self.head
while n!= None: # we keep traverse till last
print(n.data,"--->",end="")
n = n.next_node
def insert_atEnd(self,data):
new_node = Node(data)
if self.head is None:
self.head = new_node
else:
n = self.head
while n.next_node!=None:
n = n.next_node
n.next_node = new_node
# def insertionSort(self,head):
# start = Node(0)
# start.next_node = head
# curr = head
# prev = start
# while curr != None:
# if curr.next_node != None and curr.next_node.data <curr.data:
# while prev.next_node != None and prev.next_node.data<curr.next_node.data:
# prev = prev.next_node
# temp = prev.next_node
# prev.next_node = curr.next_node
# curr.next_node = curr.next_node.next_node
# prev.next_node.next_node = temp
# prev = start
# else:
# curr = curr.next_node
# return start.next_node
def sort(self,head):
dummy = Node(0)
curr = head
while curr != None:
prev = dummy
next = dummy.next_node
temp = curr.next_node
while next!=None:
if next.data > curr.data:
break
prev = next
next = next.next_node
curr.next_node = next
prev.next_node = curr
curr = temp
return dummy.next_node
l = LinkedList()
l.insert_atEnd(100)
l.insert_atEnd(40)
l.insert_atEnd(50)
l.insert_atEnd(2)
l.display()
print(" ")
l.head = l.sort(l.head)
l.display()
|
[
"77201164+Vijay1234-coder@users.noreply.github.com"
] |
77201164+Vijay1234-coder@users.noreply.github.com
|
4bc0d6e4b5b52679568e0bb0c742c95e4961f9f0
|
bd4734d50501e145bc850426c8ed595d1be862fb
|
/6Kyu - Format a string of names like 'Bart, Lisa & Maggie'.py
|
c0188e848a5e41d60affcf353ae989beba6063b6
|
[] |
no_license
|
OrdinaryCoder00/CODE-WARS-PROBLEMS-SOLUTIONS
|
f61ff9e5268305519ffeed4964589289f4148cfd
|
5711114ddcc6a5f22f143d431b2b2e4e4e8ac9fb
|
refs/heads/master
| 2021-10-23T09:09:45.670850
| 2019-03-16T13:24:17
| 2019-03-16T13:24:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 396
|
py
|
def namelist(names):
#assignments
length_of_list = len(names)
i = 0
str1 = ""
f = length_of_list - 1
while i<length_of_list:
m = names[i]
k = m.popitem()
n = k[1]
if i ==0:
str1 = n
elif f==i:
str1 = str1 + " & " + n
else:
str1 = str1 + ", "+n
i = i + 1
return str1
|
[
"noreply@github.com"
] |
OrdinaryCoder00.noreply@github.com
|
187ff3aa82fc1514d839eaa4238c98715236c02a
|
08d17ddeb5713d8e7a4ee01054fcce78ed7f5191
|
/tensorflow/python/autograph/pyct/static_analysis/liveness_test.py
|
f14b1a3e79de80d2218366e086d649fa5493be4f
|
[
"Apache-2.0"
] |
permissive
|
Godsinred/tensorflow
|
9cd67e1088ad8893265651ad4a5c45a6640b6c96
|
45100d5f55d7cba15bffcd91bf521ed37daf7bca
|
refs/heads/master
| 2020-04-25T19:44:53.669366
| 2019-02-28T01:54:55
| 2019-02-28T02:59:15
| 173,030,955
| 2
| 0
|
Apache-2.0
| 2019-02-28T03:03:41
| 2019-02-28T03:03:41
| null |
UTF-8
|
Python
| false
| false
| 7,335
|
py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for liveness module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.platform import test
class LivenessTest(test.TestCase):
def _parse_and_analyze(self, test_fn):
node, source = parser.parse_entity(test_fn)
entity_info = transformer.EntityInfo(
source_code=source,
source_file=None,
namespace={},
arg_values=None,
arg_types=None,
owner_type=None)
node = qual_names.resolve(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
liveness.resolve(node, ctx, graphs)
return node
def assertHasLiveOut(self, node, expected):
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
live_out_strs = set(str(v) for v in live_out)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_out_strs, set(expected))
def assertHasLiveIn(self, node, expected):
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_in_strs = set(str(v) for v in live_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_in_strs, set(expected))
def test_live_out_stacked_if(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], ('a', 'x'))
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_stacked_if_else(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
else:
x = 2
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'a')
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_for_basic(self):
def test_fn(x, a):
for i in range(a):
x += i
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'x')
def test_live_out_attributes(self):
def test_fn(x, a):
if a > 0:
x.y = 0
return x.y
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
def test_live_out_nested_functions(self):
def test_fn(a, b):
if b:
a = []
def foo():
return a
foo()
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'a')
def test_live_out_nested_functions_isolation(self):
def test_fn(b):
if b:
a = 0 # pylint:disable=unused-variable
def child():
max(a) # pylint:disable=used-before-assignment
a = 1
return a
child()
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], 'max')
def test_live_out_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveOut(fn_body[0], ())
def test_live_in_stacked_if(self):
def test_fn(x, a, b, c):
if a > 0:
x = b
if c > 1:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[1], ('c', 'x'))
def test_live_in_stacked_if_else(self):
def test_fn(x, a, b, c, d):
if a > 1:
x = b
else:
x = c
if d > 0:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'd'))
self.assertHasLiveIn(fn_body[1], ('d', 'x'))
def test_live_in_for_basic(self):
def test_fn(x, y, a):
for i in a:
x = i
y += x
z = 0
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_for_nested(self):
def test_fn(x, y, a):
for i in a:
for j in i:
x = i
y += x
z = j
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
self.assertHasLiveIn(fn_body[0], ('a', 'x', 'y'))
def test_live_in_generator_comprehension(self):
def test_fn(y):
if all(x for x in y):
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('all', 'x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('all', 'y'))
def test_live_in_list_comprehension(self):
def test_fn(y):
if [x for x in y]:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_set_comprehension(self):
def test_fn(y):
if {x for x in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_dict_comprehension(self):
def test_fn(y):
if {k: v for k, v in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body[0].body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('k', 'v', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
if __name__ == '__main__':
test.main()
|
[
"gardener@tensorflow.org"
] |
gardener@tensorflow.org
|
078058580720b4b789bb263b2edeb244ccf671b5
|
80a3e654cf33e5c86410e207e3a28ed160adbd75
|
/list_questions/listodd.py
|
e300ae82628a87b0fa9ece8ef36a79bbb870762e
|
[] |
no_license
|
Rinkikumari19/python
|
f3f6e57ca39d7d6fe4e110264eb5685be2441f66
|
2b98f4bac313725c2716cc8a60440336d28acba4
|
refs/heads/master
| 2022-11-26T06:47:34.161251
| 2020-08-02T18:37:54
| 2020-08-02T18:37:54
| 284,518,849
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
# elements = [23,14,56,12,19,9,15,25,31,42,43]
# i = 0
# odd = 0
# even = 0
# while i < len(elements):
# if elements[i]%2==0:
# even = even + 1
# else:
# odd = odd + 1
# i = i + 1
# print(even)
# print(odd)
# ye code kitne odd or even number hai vo print krega
# elements = [23,14,56,12,19,9,15,25,31,42,43]
# i = 0
# odd_sum = 0
# even_sum = 0
# ave = 0
# ave1 = 0
# while i < len(elements):
# if elements[i] % 2 == 0:
# even_sum = even_sum + elements[i]
# ave = ave + 1
# else:
# odd_sum = odd_sum + elements[i]
# ave1 = ave1 + 1
# i = i + 1
# print(even_sum/ave)
# print(odd_sum/ave1)
# is code me even_sum or odd_sum ka average print krega
elements = [23,14,56,12,19,9,15,25,31,42,43]
i = 0
sum_odd = 0
sum_even = 0
ave = 0
ave1 = 0
while i < len(elements):
if elements[i]%2==0:
sum_even=sum_even+elements[i]
ave=ave+1
else:
sum_odd=sum_odd+elements[i]
ave1=ave1+1
i = i + 1
print("odd number ka count:",ave1)
print("even number ka count:",ave)
print("sare number ka count:",ave1+ave)
print("odd number ka sum:",sum_odd)
print("even number ka sum:",sum_even)
print("sare number ka sum:",sum_odd+sum_even)
print("odd number ka avarage:",sum_odd/ave1)
print("even number ka avarage:",sum_even/ave)
print("sare numbers ka avarage:",(sum_even+sum_odd)/i)
# ye code sara kuchh print krega
|
[
"ravina18@navgurukul.org"
] |
ravina18@navgurukul.org
|
0ec110b96e33c6f65e992debda2c5d9e67ecba6d
|
8afb5afd38548c631f6f9536846039ef6cb297b9
|
/MY_REPOS/INTERVIEW-PREP-COMPLETE/notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/arithmetic_analysis/secant_method.py
|
45bcb185fc3ec25a7773d736029375983b9a12c6
|
[
"MIT"
] |
permissive
|
bgoonz/UsefulResourceRepo2.0
|
d87588ffd668bb498f7787b896cc7b20d83ce0ad
|
2cb4b45dd14a230aa0e800042e893f8dfb23beda
|
refs/heads/master
| 2023-03-17T01:22:05.254751
| 2022-08-11T03:18:22
| 2022-08-11T03:18:22
| 382,628,698
| 10
| 12
|
MIT
| 2022-10-10T14:13:54
| 2021-07-03T13:58:52
| null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
"""
Implementing Secant method in Python
Author: dimgrichr
"""
from math import exp
def f(x: float) -> float:
"""
>>> f(5)
39.98652410600183
"""
return 8 * x - 2 * exp(-x)
def secant_method(lower_bound: float, upper_bound: float, repeats: int) -> float:
"""
>>> secant_method(1, 3, 2)
0.2139409276214589
"""
x0 = lower_bound
x1 = upper_bound
for i in range(0, repeats):
x0, x1 = x1, x1 - (f(x1) * (x1 - x0)) / (f(x1) - f(x0))
return x1
if __name__ == "__main__":
print(f"Example: {secant_method(1, 3, 2)}")
|
[
"bryan.guner@gmail.com"
] |
bryan.guner@gmail.com
|
701220fce2f2348ed36924cb47be6003d1eb8267
|
bb767bfc9db2b0ab7f24d3561b168a829c4eb0bc
|
/1st_Year/1st_Semestre/Fpro/Python/saved files/rm_letter_rev.py
|
6caffafdd7696fa4adee6e2c1bdb8170060842c0
|
[] |
no_license
|
Hugomguima/FEUP
|
7e6e0faf5408d698a34c3b5aed977b20aa76c067
|
f26887e2b8e92e41ae5050515cd0b3cdf94d6476
|
refs/heads/master
| 2023-06-09T05:21:38.897094
| 2021-06-29T17:00:01
| 2021-06-29T17:00:01
| 272,567,282
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri Nov 23 13:31:14 2018
@author: Hugo
"""
def rm_letter_rev(l,astr):
result = ""
for i in astr:
if i != l:
result += i
result = result[::-1]
return result
|
[
"50011557+Hugomguima@users.noreply.github.com"
] |
50011557+Hugomguima@users.noreply.github.com
|
b47db36712832d90f04b309c0c0c0541b67d3816
|
f211382033cbedd7304ad640f9bb869be61fff34
|
/parkstay/migrations/0006_auto_20161114_0840.py
|
6867960ef185c545daed8acd144be67f94903915
|
[
"Apache-2.0"
] |
permissive
|
dbca-wa/parkstay_bs
|
0b2f918f645dfbd1470a40934aae5304f4333942
|
ce1b6cd75fb3021863005097c5ce2c0e2dbf3061
|
refs/heads/master
| 2023-06-27T14:27:01.658254
| 2023-06-16T07:55:14
| 2023-06-16T07:55:14
| 234,250,398
| 2
| 3
|
NOASSERTION
| 2023-04-28T01:38:48
| 2020-01-16T06:24:41
|
Vue
|
UTF-8
|
Python
| false
| false
| 418
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.10 on 2016-11-14 00:40
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('parkstay', '0005_auto_20161111_1302'),
]
operations = [
migrations.RenameModel(
old_name='BookingRange',
new_name='CampgroundBookingRange',
),
]
|
[
"ndwigabrian@gmail.com"
] |
ndwigabrian@gmail.com
|
b7b0d94145519dfe8dc76e850870731e3dbc0b0a
|
7f92c2fc131ca637d8b7c2a4dbba4b974884e786
|
/lab6/plottingScripts/plotting3_b.py
|
5a1f42e0d764db963bd81041c6ba111e9de86d38
|
[] |
no_license
|
byronwasti/CircuitsLabs
|
2c5694f07a59adedddde361d0a85a690a83e096b
|
be1227c504ed1a2b81b6d670cbaa45d4b8be8e17
|
refs/heads/master
| 2020-05-23T11:15:14.853587
| 2017-09-03T18:53:50
| 2017-09-03T18:53:50
| 80,369,111
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 979
|
py
|
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import csv
def getData(FILENAME):
x = []
y = []
with open(FILENAME, 'r') as f:
reader = csv.reader(f)
for i, row in enumerate(reader):
if i==0 : continue
if i > 75: continue
x.append(float(row[0]))
y.append(-float(row[1]))
return np.array(x), np.array(y)
def plot():
plt.xlabel("Input Current (A)")
plt.ylabel("Output Current (A)")
plt.title("nMOS Current Divider (b)")
plt.legend()
plt.show()
if __name__ == "__main__":
iin, iout = getData("../data/experiment3_current_divider_b_3.csv")
theoretical = [ i/2 for i in iin ]
plt.plot(iin, iout, '.', label="Experimental Data")
plt.plot(iin, theoretical, '-', label="Theoretical")
fit = np.polyfit(iin, iout, 1)
plt.text(0.006, 0.002, "Experimental Divider Ratio: %e\nTheoretical Divider Ratio: 0.5" % fit[0])
plot()
|
[
"byron.wasti@gmail.com"
] |
byron.wasti@gmail.com
|
f811948d04902553e78577fab2df24ffa33c479b
|
33febf8b617ef66d7086765f1c0bf6523667a959
|
/probpy/learn/conjugate/categorical.py
|
bb3b32c9b459ebab2743640c68665cdb00c0b78b
|
[] |
no_license
|
JonasRSV/probpy
|
857201c7f122461463b75d63e5c688e011615292
|
5203063db612b2b2bc0434a7f2a02c9d2e27ed6a
|
refs/heads/master
| 2022-07-07T06:17:44.504570
| 2020-04-15T14:52:20
| 2020-04-15T14:52:20
| 245,820,195
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,361
|
py
|
from probpy.core import RandomVariable
from typing import Tuple
from probpy.distributions import categorical, dirichlet
from .identification import _check_no_none_parameters, _check_only_none_is
import numpy as np
import numba
class CategoricalDirichlet_PPrior:
"""Conjugate prior for categorical likelihood with unknown probability"""
@staticmethod
def is_conjugate(likelihood: RandomVariable, prior: RandomVariable):
if prior.cls is dirichlet \
and _check_no_none_parameters(prior) \
and _check_only_none_is(likelihood, [categorical.probabilities]):
return True
return False
@staticmethod
def fast_loop(data: np.ndarray, categories: int):
result = np.zeros(categories)
for d in data:
result[d] += 1
return result
@staticmethod
def posterior(data: np.ndarray, _: RandomVariable, prior: RandomVariable) -> RandomVariable:
data = np.array(data[0])
if data.ndim == 0: data = data.reshape(-1)
prior_alpha = prior.parameters[dirichlet.alpha].value
if data.ndim == 1:
posterior_alpha = prior_alpha + CategoricalDirichlet_PPrior.fast_loop(data, prior_alpha.size)
else:
posterior_alpha = prior_alpha + data.sum(axis=0)
return dirichlet.med(alpha=posterior_alpha)
|
[
"jonas@valfridsson.net"
] |
jonas@valfridsson.net
|
7f2ff1e453a578c8abf2536555d54b86452d42a3
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/13134040.py
|
2692f3e9b3f0d0a78d49e95833ebb37e89575d13
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,853
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/13134040.py generated: Fri, 27 Mar 2015 15:48:00
#
# Event Type: 13134040
#
# ASCII decay Descriptor: {[[B_s0]nos -> (J/psi(1S) -> p+ p~-) (f_0(980) -> pi+ pi-)]cc, [[B_s0]os -> (J/psi(1S) -> p+ p~-) (f_0(980) -> pi- pi+)]cc}
#
from Configurables import Generation
Generation().EventType = 13134040
Generation().SampleGenerationTool = "SignalRepeatedHadronization"
from Configurables import SignalRepeatedHadronization
Generation().addTool( SignalRepeatedHadronization )
Generation().SignalRepeatedHadronization.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Bs_Jpsif0,pp=DecProdCut.dec"
Generation().SignalRepeatedHadronization.CutTool = "DaughtersInLHCb"
Generation().SignalRepeatedHadronization.SignalPIDList = [ 531,-531 ]
# Ad-hoc particle gun code
from Configurables import ParticleGun
pgun = ParticleGun("ParticleGun")
pgun.SignalPdgCode = 531
pgun.DecayTool = "EvtGenDecay"
pgun.GenCutTool = "DaughtersInLHCb"
from Configurables import FlatNParticles
pgun.NumberOfParticlesTool = "FlatNParticles"
pgun.addTool( FlatNParticles , name = "FlatNParticles" )
from Configurables import MomentumSpectrum
pgun.ParticleGunTool = "MomentumSpectrum"
pgun.addTool( MomentumSpectrum , name = "MomentumSpectrum" )
pgun.MomentumSpectrum.PdgCodes = [ 531,-531 ]
pgun.MomentumSpectrum.InputFile = "$PGUNSDATAROOT/data/Ebeam4000GeV/MomentumSpectrum_531.root"
pgun.MomentumSpectrum.BinningVariables = "pteta"
pgun.MomentumSpectrum.HistogramPath = "h_pteta"
from Configurables import BeamSpotSmearVertex
pgun.addTool(BeamSpotSmearVertex, name="BeamSpotSmearVertex")
pgun.VertexSmearingTool = "BeamSpotSmearVertex"
pgun.EventType = 13134040
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
b762af412c6c4bda55851994e6fb8833f90b16e2
|
45c12e6703e621d32ec46137d3c5c65d02d0a2c2
|
/08. On Time for the Exam.py
|
fcea6f82c81e51c2b1c97cc66a937263149929de
|
[] |
no_license
|
antondelchev/Conditional-Statements-Advanced---Exercise
|
ce6511e7ad6a06ecebeddc6c758c4dcaa76bf062
|
e74a3ee6353d37035955686eb2d334f423758fef
|
refs/heads/main
| 2023-02-22T12:10:18.628103
| 2021-01-25T20:33:25
| 2021-01-25T20:33:25
| 332,012,060
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
exam_hour = int(input())
exam_minutes = int(input())
arrival_hour = int(input())
arrival_minutes = int(input())
converted_mins_exam = exam_hour * 60 + exam_minutes
converted_mins_arrival = arrival_hour * 60 + arrival_minutes
if converted_mins_exam < converted_mins_arrival:
total_mins_difference = converted_mins_arrival - converted_mins_exam
hours_over = total_mins_difference // 60
mins_over = total_mins_difference % 60
print("Late")
if total_mins_difference <= 59:
print(f"{mins_over} minutes after the start")
else:
print(f"{hours_over}:{mins_over:02d} hours after the start")
elif converted_mins_exam == converted_mins_arrival or 0 < (converted_mins_exam - converted_mins_arrival) <= 30:
total_mins_difference = converted_mins_exam - converted_mins_arrival
hours_under = total_mins_difference // 60
mins_under = total_mins_difference % 60
print("On time")
if 0 < total_mins_difference <= 59:
print(f"{mins_under} minutes before the start")
elif total_mins_difference > 59:
print(f"{hours_under}:{mins_under:02d} hours before the start")
elif converted_mins_exam - converted_mins_arrival > 30:
total_mins_difference = converted_mins_exam - converted_mins_arrival
hours_under = total_mins_difference // 60
mins_under = total_mins_difference % 60
print("Early")
if 0 < total_mins_difference <= 59:
print(f"{mins_under} minutes before the start")
elif total_mins_difference > 59:
print(f"{hours_under}:{mins_under:02d} hours before the start")
|
[
"noreply@github.com"
] |
antondelchev.noreply@github.com
|
a98ff93e061dffd4e07b14ff57b7b8f5c6ffe2fa
|
92aeff7cf4b42beac59131e6f7cef0f96a3ad12e
|
/pubmedpy/tests/test_names.py
|
a556dd7e62afc77f6ad3748119244132af686360
|
[
"BlueOak-1.0.0"
] |
permissive
|
ben-heil/pubmedpy
|
f6a6714ec9452e4a730e48cf1158e325c2c9ac99
|
9d716768f5ab798ec448154588e4fd99afd7584a
|
refs/heads/main
| 2023-04-03T04:16:42.852244
| 2021-04-09T14:22:10
| 2021-04-09T14:22:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,213
|
py
|
import pytest
from ..names import simplify_fore_name, simplify_last_name
@pytest.mark.parametrize(
("fore_name", "expected"),
[
(" Daniel ", "Daniel"),
("AB Chow", "Chow"),
("A.B. Chow", "Chow"),
("Mc-Winters", "Mc-Winters"),
("LE", None),
("Le", "Le"),
(None, None),
("", None),
(" ", None),
("-", None),
("-Rafeel!", "Rafeel"),
],
)
def test_simplify_fore_name(fore_name, expected):
assert simplify_fore_name(fore_name) == expected
@pytest.mark.parametrize(
("fore_name", "expected"),
[
(" Daniel ", "daniel"),
("Mc-Winters", "mc-winters"),
("LE", None),
("", None),
(" ", None),
("-", None),
],
)
def test_simplify_fore_name_lower(fore_name, expected):
assert simplify_fore_name(fore_name, lower=True) == expected
@pytest.mark.parametrize(
("last_name", "expected"),
[
(" Heavenstone .", "Heavenstone"),
("Heavenstone", "Heavenstone"),
("", None),
(" ", None),
(None, None),
],
)
def test_simplify_last_name(last_name, expected):
assert simplify_last_name(last_name) == expected
|
[
"daniel.himmelstein@gmail.com"
] |
daniel.himmelstein@gmail.com
|
9c4fab802d9025afaed146b90a8ac64f54f6080b
|
da5bc6efaebc9ff015938d207b25c7804bc03b33
|
/12_file/ex03/ex03.py
|
e38c6d46c75404f638dff5a618058e8ca63df2df
|
[] |
no_license
|
codud0954/megait_python_20201116
|
b0f68f50a1e0d41c3c35535e718d5a236a7b1a98
|
a71f57d4332027406953599612cd014de2d26713
|
refs/heads/master
| 2023-01-31T11:14:27.611468
| 2020-12-18T09:03:11
| 2020-12-18T09:03:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 715
|
py
|
# csv 파일 쓰기/읽기
# csv 파일 만들기
# f = open('member.csv', 'w', encoding='ms949')
# f.write("유재석,49,=\"01011112222\"\n")
# f.write("이효리,30,=\"01012346666\"\n")
# f.write("강호동,55,=\"01014563555\"\n")
# f.close()
# csv 파일 열기
f = open('member.csv', 'r', encoding='ms949')
while True:
line = f.readline()
if not line:
break
line = line.replace("\n", "") # 줄바꿈 제거
line = line.replace("=", "") # = 제거
line = line.replace('"', "") # " 제거
#print(line)
name, age, phone = line.split(',')
print("이름은 %s이고 나이는 %d, 번호는 %s이다." % (name, int(age), phone))
f.close()
|
[
"noreply@github.com"
] |
codud0954.noreply@github.com
|
daf3148101435d5d8870c7b72995f4b085a2419a
|
70cc02f7ef2ce7e0985ff175f1947bd48452c6b9
|
/Leetcode/array/remove_duplicates_sorted_array2.py
|
6b0b76920227a4642663af30beb9783d80df90ad
|
[] |
no_license
|
Harshala-Gaikwad/Programming
|
9784c050d9a8d72afefe1836a5493b30194f0a2a
|
826a06499fbde4c2310fb9dad1a6fee84bc32f83
|
refs/heads/main
| 2023-07-04T20:22:27.753976
| 2021-08-19T17:34:23
| 2021-08-19T17:34:23
| 312,341,223
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 284
|
py
|
class Solution:
def removeDuplicates(self, nums: List[int]) -> int:
l = []
for i in nums:
if l.count(i)<2:
l.append(i)
nums.clear()
for i in l:
nums.append(i)
return len(nums)
|
[
"noreply@github.com"
] |
Harshala-Gaikwad.noreply@github.com
|
9d4d2b81cf1cca80ec29f799099cf8544f48d103
|
97080682a7cabb3fc770de47c97ee757575837d9
|
/modern_users/forms.py
|
993c3a4cb0a79cd1d010b74e74e9e1dd0b8b51f8
|
[] |
no_license
|
SlikNik/modern_village
|
fe77f210bb2b7eea27a149d7b78634937dfbd733
|
ca26e553caa0fe36758b228ca55229a8373c569e
|
refs/heads/master
| 2023-01-02T23:29:16.996471
| 2020-10-20T03:27:12
| 2020-10-20T03:27:12
| 301,758,821
| 2
| 0
| null | 2020-10-19T23:13:17
| 2020-10-06T14:42:43
|
Python
|
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
from django import forms
from modern_users.models import ModernUsers
class SignUpForm(forms.ModelForm):
password = forms.CharField(widget=forms.PasswordInput)
confirm_password=forms.CharField(widget=forms.PasswordInput())
class Meta:
model = ModernUsers
fields = ('first_name', 'last_name', 'email', 'address', 'city', 'zipcode', 'age', 'birthday', 'facebook', 'instagram', 'twitter', 'username', 'password', 'user_pic',)
def clean(self):
cleaned_data = super(SignUpForm, self).clean()
password = cleaned_data.get("password")
confirm_password = cleaned_data.get("confirm_password")
if password != confirm_password:
raise forms.ValidationError(
"password and confirm_password does not match"
)
class EditProfileForm(forms.ModelForm):
class Meta:
model = ModernUsers
fields = ('first_name', 'last_name', 'address', 'city', 'zipcode', 'age', 'birthday', 'facebook', 'instagram', 'twitter', 'user_pic',)
|
[
"nikalmorgan11@gmail.com"
] |
nikalmorgan11@gmail.com
|
faa36c8bcb8ff93361f4d6bcb847f541945fa7b1
|
1e71542814768fb570dadda69f3ceac57d6f7a35
|
/untitled/frame1.py
|
c2c176a928e83d39319469d7c56e9aadf5bd5cfd
|
[] |
no_license
|
apabhishek178/website_work
|
7a621bb5983e68c971db0ae2117c946a9484a8a7
|
97910ca2541ea612f715d2e19c5f659694941122
|
refs/heads/master
| 2021-07-21T07:52:21.901795
| 2017-10-29T05:59:35
| 2017-10-29T05:59:35
| 108,672,122
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 458
|
py
|
from tkinter import *
import tkinter
root=tkinter.Tk()
topframe=Frame(root)
topframe.pack()
bottomframe=Frame(root)
bottomframe.pack(side=RIGHT)
button1=Button(root,text="1",fg="red",font=28)
button2=Button(root,text="2",fg="green",font=26)
button3=Button(root,text="3",fg="blue",font=24)
button4=Button(bottomframe,text="4",fg="purple",font=22)
button1.pack(side=TOP)
button2.pack(side=LEFT)
button3.pack(side=LEFT)
button4.pack(side=LEFT)
root.mainloop()
|
[
"apabhishek178@gmail.com"
] |
apabhishek178@gmail.com
|
4ec5569bc9090ac9aaf0a82c8b87e18a9fdd11b6
|
a8d771af415aa5f0e23952f8599441f36f3c4733
|
/setup.py
|
c7fbeeacbee493d16d1cd876cf4ff8b36d5146e0
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
biomodels/MODEL1302010019
|
dda481b65b15cfbedcd70be79c182be4da292cea
|
4c746971883b61b7ba85937ee360b8148e9074ea
|
refs/heads/master
| 2021-01-22T05:01:13.045263
| 2014-10-16T05:58:26
| 2014-10-16T05:58:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 377
|
py
|
from setuptools import setup, find_packages
setup(name='MODEL1302010019',
version=20140916,
description='MODEL1302010019 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/MODEL1302010019',
maintainer='Stanley Gu',
maintainer_url='stanleygu@gmail.com',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
)
|
[
"stanleygu@gmail.com"
] |
stanleygu@gmail.com
|
4b17f4f3c8e4448fa2da50d78b68a1111cc8b288
|
1c74a2e075793e1d35c441518e2e138e14e26ea5
|
/DynamicProgramming/139. 单词拆分.py
|
c91f575176f9e720791ba0590cb1319d478959d0
|
[] |
no_license
|
Dawinia/LeetCode
|
1a385bfadbc4869c46dc1e9b8ca7656b77d746a0
|
e1dcc71ca657b42eb8eb15116697e852ef4a475a
|
refs/heads/master
| 2021-07-20T00:56:01.058471
| 2020-07-22T14:07:04
| 2020-07-22T14:07:04
| 197,305,126
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 409
|
py
|
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
if not s: return False
dp = [False for _ in range(len(s) + 1)]
dp[0] = True
wordDict = set(wordDict)
for i in range(len(s) + 1):
for j in range(i):
if dp[j] and s[j: i] in wordDict:
dp[i] = True
break
return dp[-1]
|
[
"dawinialo@163.com"
] |
dawinialo@163.com
|
96aa05d3ed0dc1b7ff4de70729ea4aee4bb5d044
|
5063faf298a36466cdb90f1cbd0a4f4e855b5d3b
|
/test/test_projects_api.py
|
0f0c3cd8d6d5647894b277fdf3303c40f5c0006d
|
[] |
no_license
|
pollination/python-sdk
|
d4eb4efbcbe3a76cc170cf8e71ad5bc6ca6c3011
|
e4a94b236534658b150961795256224fe8dd93c2
|
refs/heads/master
| 2023-08-04T15:19:29.126613
| 2022-03-06T10:43:21
| 2022-03-06T10:51:08
| 224,588,062
| 3
| 1
| null | 2023-09-05T20:52:35
| 2019-11-28T06:48:40
|
Python
|
UTF-8
|
Python
| false
| false
| 2,637
|
py
|
# coding: utf-8
"""
pollination-server
Pollination Server OpenAPI Definition # noqa: E501
The version of the OpenAPI document: 0.27.0
Contact: info@pollination.cloud
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import pollination_sdk
from pollination_sdk.api.projects_api import ProjectsApi # noqa: E501
from pollination_sdk.rest import ApiException
class TestProjectsApi(unittest.TestCase):
"""ProjectsApi unit test stubs"""
def setUp(self):
self.api = pollination_sdk.api.projects_api.ProjectsApi() # noqa: E501
def tearDown(self):
pass
def test_create_project(self):
"""Test case for create_project
Create a Project # noqa: E501
"""
pass
def test_create_project_recipe_filter(self):
"""Test case for create_project_recipe_filter
Upsert a recipe filter to a project # noqa: E501
"""
pass
def test_delete_project(self):
"""Test case for delete_project
Delete a Project # noqa: E501
"""
pass
def test_delete_project_org_permission(self):
"""Test case for delete_project_org_permission
Remove a Project permissions # noqa: E501
"""
pass
def test_delete_project_recipe_filter(self):
"""Test case for delete_project_recipe_filter
Remove a Project recipe filter # noqa: E501
"""
pass
def test_get_project(self):
"""Test case for get_project
Get a project # noqa: E501
"""
pass
def test_get_project_access_permissions(self):
"""Test case for get_project_access_permissions
Get project access permissions # noqa: E501
"""
pass
def test_get_project_recipe_filters(self):
"""Test case for get_project_recipe_filters
Get project recipe filters # noqa: E501
"""
pass
def test_get_project_recipes(self):
"""Test case for get_project_recipes
Get project recipes # noqa: E501
"""
pass
def test_list_projects(self):
"""Test case for list_projects
List Projects # noqa: E501
"""
pass
def test_update(self):
"""Test case for update
Update a Project # noqa: E501
"""
pass
def test_upsert_project_permission(self):
"""Test case for upsert_project_permission
Upsert a new permission to a project # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"antoinedao1@gmail.com"
] |
antoinedao1@gmail.com
|
542f651000d9847a3a1e8b6fd63bd0714affc2da
|
551b75f52d28c0b5c8944d808a361470e2602654
|
/huaweicloud-sdk-kms/huaweicloudsdkkms/v1/model/api_version_detail.py
|
2fb3fceb4eb332ac3df439c22622271584e00bd6
|
[
"Apache-2.0"
] |
permissive
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
9d6597ce8ab666a9a297b3d936aeb85c55cf5877
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
refs/heads/master
| 2023-05-08T21:32:31.920300
| 2021-05-26T08:54:18
| 2021-05-26T08:54:18
| 370,898,764
| 0
| 0
|
NOASSERTION
| 2021-05-26T03:50:07
| 2021-05-26T03:50:07
| null |
UTF-8
|
Python
| false
| false
| 7,101
|
py
|
# coding: utf-8
import pprint
import re
import six
class ApiVersionDetail:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'links': 'list[ApiLink]',
'version': 'str',
'status': 'str',
'updated': 'str',
'min_version': 'str'
}
attribute_map = {
'id': 'id',
'links': 'links',
'version': 'version',
'status': 'status',
'updated': 'updated',
'min_version': 'min_version'
}
def __init__(self, id=None, links=None, version=None, status=None, updated=None, min_version=None):
"""ApiVersionDetail - a model defined in huaweicloud sdk"""
self._id = None
self._links = None
self._version = None
self._status = None
self._updated = None
self._min_version = None
self.discriminator = None
if id is not None:
self.id = id
if links is not None:
self.links = links
if version is not None:
self.version = version
if status is not None:
self.status = status
if updated is not None:
self.updated = updated
if min_version is not None:
self.min_version = min_version
@property
def id(self):
"""Gets the id of this ApiVersionDetail.
版本ID(版本号),如“v1.0”。
:return: The id of this ApiVersionDetail.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ApiVersionDetail.
版本ID(版本号),如“v1.0”。
:param id: The id of this ApiVersionDetail.
:type: str
"""
self._id = id
@property
def links(self):
"""Gets the links of this ApiVersionDetail.
JSON对象,详情请参见links字段数据结构说明。
:return: The links of this ApiVersionDetail.
:rtype: list[ApiLink]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ApiVersionDetail.
JSON对象,详情请参见links字段数据结构说明。
:param links: The links of this ApiVersionDetail.
:type: list[ApiLink]
"""
self._links = links
@property
def version(self):
"""Gets the version of this ApiVersionDetail.
若该版本API支持微版本,则填支持的最大微版本号,如果不支持微版本,则返回空字符串。
:return: The version of this ApiVersionDetail.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this ApiVersionDetail.
若该版本API支持微版本,则填支持的最大微版本号,如果不支持微版本,则返回空字符串。
:param version: The version of this ApiVersionDetail.
:type: str
"""
self._version = version
@property
def status(self):
"""Gets the status of this ApiVersionDetail.
版本状态,包含如下3种: - CURRENT:表示该版本为主推版本。 - SUPPORTED:表示为老版本,但是现在还继续支持。 - DEPRECATED:表示为废弃版本,存在后续删除的可能。
:return: The status of this ApiVersionDetail.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ApiVersionDetail.
版本状态,包含如下3种: - CURRENT:表示该版本为主推版本。 - SUPPORTED:表示为老版本,但是现在还继续支持。 - DEPRECATED:表示为废弃版本,存在后续删除的可能。
:param status: The status of this ApiVersionDetail.
:type: str
"""
self._status = status
@property
def updated(self):
"""Gets the updated of this ApiVersionDetail.
版本发布时间,要求用UTC时间表示。如v1.发布的时间2014-06-28T12:20:21Z。
:return: The updated of this ApiVersionDetail.
:rtype: str
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this ApiVersionDetail.
版本发布时间,要求用UTC时间表示。如v1.发布的时间2014-06-28T12:20:21Z。
:param updated: The updated of this ApiVersionDetail.
:type: str
"""
self._updated = updated
@property
def min_version(self):
"""Gets the min_version of this ApiVersionDetail.
若该版本API 支持微版本,则填支持的最小微版本号,如果不支持微版本,则返回空字符串。
:return: The min_version of this ApiVersionDetail.
:rtype: str
"""
return self._min_version
@min_version.setter
def min_version(self, min_version):
"""Sets the min_version of this ApiVersionDetail.
若该版本API 支持微版本,则填支持的最小微版本号,如果不支持微版本,则返回空字符串。
:param min_version: The min_version of this ApiVersionDetail.
:type: str
"""
self._min_version = min_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ApiVersionDetail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
e8b10831544dd015ee2d3f1a85a00b24b27c14ec
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r8/Gen/DecFiles/options/25113000.py
|
00acb6e700d0a179d725b0d6740b0388b780f7d4
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 756
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r8/Gen/DecFiles/options/25113000.py generated: Fri, 27 Mar 2015 15:48:05
#
# Event Type: 25113000
#
# ASCII decay Descriptor: [Lambda_c+ -> p+ mu- mu+]cc
#
from Configurables import Generation
Generation().EventType = 25113000
Generation().SampleGenerationTool = "SignalPlain"
from Configurables import SignalPlain
Generation().addTool( SignalPlain )
Generation().SignalPlain.ProductionTool = "PythiaProduction"
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/Lc_pmumu=OS,DecProdCut.dec"
Generation().SignalPlain.CutTool = "DaughtersInLHCb"
Generation().SignalPlain.SignalPIDList = [ 4122,-4122 ]
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
83f6ae6e2ca3950d4b58b8575c1d4bbdaa0baa58
|
ec931947aa3e06ce565637e7ee1cb707f56375a2
|
/aoc2015/day24/day24.py
|
d4455fd57993b221ad974a8240dfe528ac07f407
|
[] |
no_license
|
chrisglencross/advent-of-code
|
5f16ed7e2265d27ce15f502ce2a1c2f11fc99fc0
|
21623d4aa01a9e20285a0233c50f8f56c4099af5
|
refs/heads/master
| 2023-01-24T22:01:30.829679
| 2023-01-12T23:03:03
| 2023-01-12T23:03:03
| 224,833,969
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,031
|
py
|
#!/usr/bin/python3
# Advent of code 2015 day 24
# See https://adventofcode.com/2015/day/24
import functools
import itertools
def quantum_entanglement(packages):
return functools.reduce(lambda x, y: x * y, packages)
def get_passenger_compartment_quantum_entanglement(packages, compartments):
total_weight = sum(packages)
compartment_weight = total_weight // compartments
candidates = []
for i in range(0, len(packages)):
if candidates:
break
for group in itertools.combinations(packages, i):
if sum(group) == compartment_weight:
candidates.append(group)
passenger_compartment_packages = min(candidates, key=quantum_entanglement)
return quantum_entanglement(passenger_compartment_packages)
with open("input.txt") as f:
all_packages = [int(line) for line in f.readlines()]
print("Part 1:", get_passenger_compartment_quantum_entanglement(all_packages, 3))
print("Part 2:", get_passenger_compartment_quantum_entanglement(all_packages, 4))
|
[
"chris@glencross.org"
] |
chris@glencross.org
|
cd87b660cb646404a973d34f0eac4fd6a9701f37
|
183d51188e6aaf077023a7ab2f9e35a681707e4e
|
/Well_of_Mimir/local/bin/pilprint.py
|
93c7698b0101acd9e5fe33fcfd2e63812d92026f
|
[] |
no_license
|
hanwei2008/Virtual_Environment
|
6541d8dd608d620f76fcbc84f1c5bf2581a3b49e
|
5df207171d27333d3f7cf45447a558f4f97e1c10
|
refs/heads/master
| 2016-09-14T07:11:57.086452
| 2016-04-22T01:21:43
| 2016-04-22T01:21:43
| 56,732,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,421
|
py
|
#!/home/caiyuantao/Virtual_Environment/Well_of_Mimir/bin/python
#
# The Python Imaging Library.
# $Id$
#
# print image files to postscript printer
#
# History:
# 0.1 1996-04-20 fl Created
# 0.2 1996-10-04 fl Use draft mode when converting.
# 0.3 2003-05-06 fl Fixed a typo or two.
#
from __future__ import print_function
VERSION = "pilprint 0.3/2003-05-05"
from PIL import Image
from PIL import PSDraw
letter = ( 1.0*72, 1.0*72, 7.5*72, 10.0*72 )
def description(file, image):
import os
title = os.path.splitext(os.path.split(file)[1])[0]
format = " (%dx%d "
if image.format:
format = " (" + image.format + " %dx%d "
return title + format % image.size + image.mode + ")"
import getopt, os, sys
if len(sys.argv) == 1:
print("PIL Print 0.2a1/96-10-04 -- print image files")
print("Usage: pilprint files...")
print("Options:")
print(" -c colour printer (default is monochrome)")
print(" -p print via lpr (default is stdout)")
print(" -P <printer> same as -p but use given printer")
sys.exit(1)
try:
opt, argv = getopt.getopt(sys.argv[1:], "cdpP:")
except getopt.error as v:
print(v)
sys.exit(1)
printer = None # print to stdout
monochrome = 1 # reduce file size for most common case
for o, a in opt:
if o == "-d":
# debug: show available drivers
Image.init()
print(Image.ID)
sys.exit(1)
elif o == "-c":
# colour printer
monochrome = 0
elif o == "-p":
# default printer channel
printer = "lpr"
elif o == "-P":
# printer channel
printer = "lpr -P%s" % a
for file in argv:
try:
im = Image.open(file)
title = description(file, im)
if monochrome and im.mode not in ["1", "L"]:
im.draft("L", im.size)
im = im.convert("L")
if printer:
fp = os.popen(printer, "w")
else:
fp = sys.stdout
ps = PSDraw.PSDraw(fp)
ps.begin_document()
ps.setfont("Helvetica-Narrow-Bold", 18)
ps.text((letter[0], letter[3]+24), title)
ps.setfont("Helvetica-Narrow-Bold", 8)
ps.text((letter[0], letter[1]-30), VERSION)
ps.image(letter, im)
ps.end_document()
except:
print("cannot print image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
|
[
"hanwei2008123@163.com"
] |
hanwei2008123@163.com
|
8fcd5967724a920ecab4cddcb4f9f250368c769d
|
cff311c1c13ebcca75aedc26256d90dd9b1b6784
|
/CMSSW_tools/scripts/modules/SaveObjectsToFile.py
|
52ce8dc5302147e1b1ba792aac933576771e589e
|
[] |
no_license
|
soarnsoar/Gen_validation
|
a892f021716fc47254b24f27ee6a2b46fa206601
|
0c0c043a32ce282fb9fdd32d30a421c81c070072
|
refs/heads/master
| 2021-08-20T08:19:58.389059
| 2020-05-20T08:09:48
| 2020-05-20T08:09:48
| 184,736,378
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 370
|
py
|
'''
void SaveObjectToFile(TString filepath, TObject *a){
TFile outputFile (filepath,"RECREATE");
a->Write();
outputFile.Write();
outputFile.Close();
}
'''
import ROOT
def SaveObjectsToFile(filepath, object_list):
outputFile=ROOT.TFile(filepath,'RECREATE')
for obj in object_list:
obj.Write()
outputFile.Write()
outputFile.Close()
|
[
"soarnsoar@gmail.com"
] |
soarnsoar@gmail.com
|
01b469ea944a60bb9607500e6a98fe4390599d52
|
df30f97d316e899b07b223bc86cfe53345627f06
|
/problems/test9/2.py
|
9b5d2ab1827366945381d2f24f0e904c29a9ec9b
|
[] |
no_license
|
GH-Lim/AlgorithmPractice
|
c6a3aa99fa639aa23d685ae14c1754e0605eaa98
|
e7b8de2075348fb9fcc34c1d7f211fdea3a4deb0
|
refs/heads/master
| 2021-06-18T17:21:10.923380
| 2021-04-18T03:43:26
| 2021-04-18T03:43:26
| 199,591,747
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,373
|
py
|
from heapq import heappop, heappush
def calc_day(month, day):
res = 0
for i in range(month):
if i in {1, 3, 5, 7, 8, 10, 12}:
res += 31
elif i in {4, 6, 9, 11}:
res += 30
else:
res += 28
return res + day
def solution(n, customers):
times = []
rest = []
work = []
cnt = [0] * (n + 1)
for i in range(1, n + 1):
heappush(rest, (0, i))
for customer in customers:
date, time, minute = customer.split()
MM, DD = map(int, date.split('/'))
hh, mm, ss = map(int, time.split(':'))
d = calc_day(MM, DD)
arrival = d * 24 * 3600 + hh * 3600 + mm * 60 + ss
if times and times[-1][0] > arrival:
arrival += 365 * 24 * 3600
times.append((arrival, int(minute) * 60))
for arr_time in times:
t, m = arr_time
while work:
fin_time, num = heappop(work)
if t >= fin_time:
heappush(rest, (fin_time, num))
else:
heappush(work, (fin_time, num))
break
if rest:
_, num = heappop(rest)
cnt[num] += 1
heappush(work, (t + m, num))
else:
fin_time, num = heappop(work)
cnt[num] += 1
heappush(work, (fin_time + m, num))
return max(cnt)
|
[
"gunhyuck11@gmail.com"
] |
gunhyuck11@gmail.com
|
6b5ab4b2754b36920528896f2aaa8f093602b0f2
|
e3376c04ecca6eaf0186f8a38eef245e03ddbe92
|
/Modulo1/04_Cuarta_semana.py/Funciones de Orden Superior/Funcion_orden_superior_02_Ejemplo.py
|
1539a70d7c21e30a3055dd4840705c6279583b55
|
[] |
no_license
|
lamorenove/Ejercicios-Python
|
d0b31810ba5d0e6d4ab922b5e325ed76cc57e324
|
ee26d1dd30db8c7e088a668e11dc635b779d933a
|
refs/heads/master
| 2023-06-24T05:46:33.383620
| 2021-07-22T17:20:23
| 2021-07-22T17:20:23
| 385,313,864
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 698
|
py
|
def crear_funcion(operador):
if operador == "-":
def resta(valor1=0, valor2=0):
return valor1 - valor2
return resta
elif operador == "*":
def multiplicacion(valor1=0, valor2=0):
return valor1 * valor2
return multiplicacion
elif operador == "/":
def division(valor1=0, valor2=0):
return valor1 / valor2
return division
funcion_resta = crear_funcion("-")
resultado = operacion(funcion_resta, 30, 10)
funcion_multiplicacion = crear_funcion("*")
resultado = operacion(funcion_multiplicacion, 30, 10)
funcion_division = crear_funcion("/")
resultado = operacion(funcion_division, 30, 10)
print(resultado)
|
[
"lamorenove@gmail.com"
] |
lamorenove@gmail.com
|
d8a88aace4b211c001c0a664802ffaaf72121697
|
dcd840c1ef56db1cd4aa1ca170ab374d3a4c10b6
|
/src/programy/clients/args.py
|
afa29cceefbe78854d412a5590ff0ba529b67c39
|
[
"MIT"
] |
permissive
|
zippyy/program-y
|
e1c9ce3be6cbbba2853842999c2277d574755eb3
|
9267a3dfcbb10ea109b187dbb3767d61ca4da841
|
refs/heads/master
| 2020-04-02T17:52:34.382681
| 2018-10-24T16:28:50
| 2018-10-24T16:28:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,264
|
py
|
"""
Copyright (c) 2016-2018 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import logging
import argparse
class ClientArguments(object):
def __init__(self, client, parser=None):
self._bot_root = "."
self._logging = logging.DEBUG
self._config_name = "config.yaml"
self._config_format = "yaml"
self._no_loop = False
def parse_args(self, client):
pass
@property
def bot_root(self):
return self._bot_root
@bot_root.setter
def bot_root(self, root):
self._bot_root = root
@property
def logging(self):
return self._logging
@property
def config_filename(self):
return self._config_name
@property
def config_format(self):
return self._config_format
@property
def noloop(self):
return self._no_loop
class CommandLineClientArguments(ClientArguments):
def __init__(self, client, parser=None):
self.args = None
self._bot_root = None
self._logging = None
self._config_name = None
self._config_format = None
self._no_loop = False
ClientArguments.__init__(self, client)
if parser is None:
self.parser = argparse.ArgumentParser()
else:
self.parser = parser
self.parser.add_argument('--bot_root', dest='bot_root', help='root folder for all bot configuration data')
self.parser.add_argument('--config', dest='config', help='configuration file location')
self.parser.add_argument('--cformat', dest='cformat', help='configuration file format (yaml|json|ini)')
self.parser.add_argument('--logging', dest='logging', help='logging configuration file')
self.parser.add_argument('--noloop', dest='noloop', action='store_true', help='do not enter conversation loop')
client.add_client_arguments(self.parser)
def parse_args(self, client):
self.args = self.parser.parse_args()
self._bot_root = self.args.bot_root
self._logging = self.args.logging
self._config_name = self.args.config
self._config_format = self.args.cformat
self._no_loop = self.args.noloop
client.parse_args(self, self.args)
|
[
"keith@keithsterling.com"
] |
keith@keithsterling.com
|
ac88203f03fb256732a6ddee77bdc257f143d26a
|
a0d2a90c21ff3e05e0fd939698a6dfb7e54d16d9
|
/GServer/mac_cmd/debuger.py
|
b571a240ddf3f8d694124c7a0afd34b03cc65be0
|
[
"MIT"
] |
permissive
|
soybean217/lora-python
|
4a72407607d2201a91b5e0a7dcd115d7788b7e65
|
9c4324f81bae8b20f6c353447189f724a5cf54c6
|
refs/heads/master
| 2022-12-13T08:24:13.267783
| 2017-12-06T08:20:40
| 2017-12-06T08:20:40
| 102,331,484
| 0
| 0
|
MIT
| 2022-12-07T23:56:50
| 2017-09-04T07:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 718
|
py
|
from time import ctime,time
def timeStumpFunc(args):
def get_function(function):
def wrappedFunc(*nkw):
time_start = time()*1000
result = function(*nkw)
time_casted = time()*1000 - time_start
print('Function', args, 'cast %f ms' % time_casted)
return result
return wrappedFunc
return get_function
def debuger(args):
def get_function(function):
def wrapped_function(*nkw):
print(args, 'begin!')
# print('input type:',type(*nkw),'len:',len(*nkw))
result = function(*nkw)
print(args, 'done!')
return result
return wrapped_function
return get_function
|
[
"13565644@qq.com"
] |
13565644@qq.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.