blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
03f970ba4bcdee92ff5b8b31f973424f87b081b6
|
2efd0540d7b05d1e56b625a92172b6aac0c9a48e
|
/Copa/venv/Scripts/pip3.6-script.py
|
a8aa2e13aee23f62ade69cbd0b144de241c157b8
|
[] |
no_license
|
EricKurachi/algoritmo_genetico
|
f40003381fc0baba91db2e42437e59f33bb0ae5c
|
46659dbfcdd6a93f66f2e8dff4c920d6300c1c41
|
refs/heads/master
| 2021-10-09T08:13:09.647460
| 2018-12-23T23:46:35
| 2018-12-23T23:46:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 404
|
py
|
#!C:\Users\Eric\Python\Copa\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3.6'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3.6')()
)
|
[
"erictkurachi@gmail.com"
] |
erictkurachi@gmail.com
|
847e269f9a9c6a38b6d1af2ea9b074571cd66b64
|
a425842a51deab915fc4319b3226cef3f49e53ea
|
/build/extriPACK/intelligent_actuator/robo_cylinder/catkin_generated/pkg.installspace.context.pc.py
|
25b485d848699b5198e6532286e6957b091976b9
|
[] |
no_license
|
Sinchiguano/Part-Localization-For-Robotic-Arm
|
1458204e52f34354cbd0e8e1bff1dfaf6caefe1c
|
ebc1ed19da171ff4b5a52a3a031ae3049b0b9eb8
|
refs/heads/master
| 2021-10-08T19:49:53.455680
| 2018-12-16T20:03:04
| 2018-12-16T20:03:04
| 155,774,219
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
# generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/casch/yumi_ws/install/include".split(';') if "/home/casch/yumi_ws/install/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rospy;std_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "robo_cylinder"
PROJECT_SPACE_DIR = "/home/casch/yumi_ws/install"
PROJECT_VERSION = "0.0.0"
|
[
"cesarsinchiguano@hotmail.es"
] |
cesarsinchiguano@hotmail.es
|
d2211d3eaf542db3587fdce683a7c3b3881827a9
|
bcf98d9adf6f0e44601d91c83453b01ad311071d
|
/listings/models.py
|
6ccf1ecd4cff2f0209fb8a27b2f7a0ceed3d7559
|
[] |
no_license
|
GMNaim/Real-Estate-project
|
89fa64cb5afeb344d43095da93bbcbf35b6b9121
|
93286edde1e5d34dcbbf3ca1695a5ef4e3577f64
|
refs/heads/master
| 2022-02-13T06:04:34.202090
| 2019-08-28T14:33:08
| 2019-08-28T14:33:08
| 192,858,474
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,610
|
py
|
from django.db import models
from datetime import datetime
from realtors.models import RealtorInformation
class IndividualListInformation(models.Model):
realtor = models.ForeignKey(RealtorInformation, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=250)
house_address = models.CharField(max_length=100, blank=True)
state = models.CharField(max_length=100)
zip_code = models.CharField(max_length=50)
city = models.CharField(max_length=100)
country = models.CharField(max_length=100)
description = models.TextField(blank=True)
price = models.IntegerField()
bedrooms = models.IntegerField()
bathrooms = models.DecimalField(max_digits=4, decimal_places=1)
garage = models.IntegerField(default=0)
square_feet = models.IntegerField()
lot_size = models.DecimalField(max_digits=5, decimal_places=1)
photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/')
photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self): # Main field to display
return self.title # Here title is the main field
|
[
"gmnaim3336@gmail.com"
] |
gmnaim3336@gmail.com
|
8841420544c5a92b4429c203c368ca8f123180ab
|
32e55bf28b9f22265bcbc1d8c0ebf52a3608187d
|
/12. Integer to Roman.py
|
03281264063156d7c93642f5364115012353c373
|
[] |
no_license
|
Garacc/LeetCode
|
9f843672a18701d032f36769c9025761199d8caf
|
215d12703b2cac4c1ad49d5a0e1060948fbbacd2
|
refs/heads/master
| 2018-10-10T03:37:48.889898
| 2018-09-17T08:38:22
| 2018-09-17T08:38:22
| 120,304,202
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 992
|
py
|
class Solution:
def intToRoman(self, num):
"""
:type num: int
:rtype: str
"""
#romandict = {1:'I', 5:'V', 10:'X', 50:'L', 100:'C', 500:'D', 1000:'M'}
thou = num // 1000
hun = (num%1000)//100
ten = (num%100)//10
one = num%10
output = ''
output += thou*'M'
if hun == 9: output += 'CM'
elif hun == 5 or hun == 6 or hun == 7 or hun == 8: output += 'D' + (hun-5)*'C'
elif hun == 4: output += 'CD'
else: output += hun*'C'
if ten == 9: output += 'XC'
elif ten == 5 or ten == 6 or ten == 7 or ten == 8: output += 'L' + (ten-5)*'X'
elif ten == 4: output += 'XL'
else: output += ten*'X'
if one == 9: output += 'IX'
elif one == 5 or one == 6 or one == 7 or one == 8: output += 'V' + (one-5)*'I'
elif one == 4: output += 'IV'
else: output += one*'I'
return output
|
[
"xgyxmxl@163.com"
] |
xgyxmxl@163.com
|
49b096d2e37b444b6167aab4b4c3ee32ff9c8f02
|
21e76f93747336bb649ec1906257b0dee66442d3
|
/resources/lib/services/nfsession/nfsession_requests.py
|
648a46b52e40073504ca231fd3706fe5f7f3ffd9
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
freedomhkg-tv/plugin.video.netflix
|
d55f1671b4fbf201e3cda34e6eea4347c3935ee6
|
30ac436ffd02389983df8610aee098eb0bc10b0c
|
refs/heads/master
| 2021-01-02T17:39:28.005728
| 2020-02-09T19:50:18
| 2020-02-09T19:50:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,480
|
py
|
# -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Copyright (C) 2019 Stefano Gottardo - @CastagnaIT
Stateful Netflix session management: handle the http requests
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
from __future__ import absolute_import, division, unicode_literals
import time
import json
import requests
import resources.lib.common as common
import resources.lib.api.website as website
from resources.lib.globals import g
from resources.lib.services.nfsession.nfsession_base import NFSessionBase, needs_login
from resources.lib.database.db_utils import TABLE_SESSION
from resources.lib.api.exceptions import (APIError, WebsiteParsingError,
InvalidMembershipStatusError)
BASE_URL = 'https://www.netflix.com'
"""str: Secure Netflix url"""
URLS = {
'login': {'endpoint': '/login', 'is_api_call': False},
'logout': {'endpoint': '/SignOut', 'is_api_call': False},
'shakti': {'endpoint': '/pathEvaluator', 'is_api_call': True},
'browse': {'endpoint': '/browse', 'is_api_call': False},
'profiles': {'endpoint': '/profiles/manage', 'is_api_call': False},
'switch_profile': {'endpoint': '/SwitchProfile', 'is_api_call': False},
'activate_profile': {'endpoint': '/profiles/switch', 'is_api_call': True},
'pin': {'endpoint': '/pin', 'is_api_call': False},
'pin_reset': {'endpoint': '/pin/reset', 'is_api_call': True},
'pin_service': {'endpoint': '/pin/service', 'is_api_call': True},
'metadata': {'endpoint': '/metadata', 'is_api_call': True},
'set_video_rating': {'endpoint': '/setVideoRating', 'is_api_call': True}, # Old rating system
'set_thumb_rating': {'endpoint': '/setThumbRating', 'is_api_call': True},
'update_my_list': {'endpoint': '/playlistop', 'is_api_call': True},
# Don't know what these could be used for. Keeping for reference
# 'video_list_ids': {'endpoint': '/preflight', 'is_api_call': True},
# 'kids': {'endpoint': '/Kids', 'is_api_call': False}
}
# List of all static endpoints for HTML/JSON POST/GET requests
# How many entries of a list will be fetched with one path request
class NFSessionRequests(NFSessionBase):
"""Handle the http requests"""
@common.addonsignals_return_call
@needs_login
def get(self, component, **kwargs):
"""Execute a GET request to the designated component's URL."""
return self._get(component, **kwargs)
@common.addonsignals_return_call
@needs_login
def post(self, component, **kwargs):
"""Execute a POST request to the designated component's URL."""
return self._post(component, **kwargs)
def _get(self, component, **kwargs):
return self._request_call(
method=self.session.get,
component=component,
**kwargs)
def _post(self, component, **kwargs):
return self._request_call(
method=self.session.post,
component=component,
**kwargs)
@common.time_execution(immediate=True)
def _request_call(self, method, component, **kwargs):
return self._request(method, component, None, **kwargs)
def _request(self, method, component, session_refreshed, **kwargs):
url = (_api_url(component)
if URLS[component]['is_api_call']
else _document_url(component))
common.debug('Executing {verb} request to {url}',
verb='GET' if method == self.session.get else 'POST', url=url)
data, headers, params = self._prepare_request_properties(component,
kwargs)
start = time.clock()
response = method(
url=url,
verify=self.verify_ssl,
headers=headers,
params=params,
data=data)
common.debug('Request took {}s', time.clock() - start)
common.debug('Request returned statuscode {}', response.status_code)
if response.status_code in [404, 401] and not session_refreshed:
# 404 - It may happen when Netflix update the build_identifier version and causes the api address to change
# 401 - It may happen when authURL is not more valid (Unauthorized for url)
# So let's try refreshing the session data (just once)
common.warn('Try refresh session data due to {} http error', response.status_code)
if self.try_refresh_session_data():
return self._request(method, component, True, **kwargs)
response.raise_for_status()
return (_raise_api_error(response.json() if response.content else {})
if URLS[component]['is_api_call']
else response.content)
def try_refresh_session_data(self, raise_exception=False):
"""Refresh session_data from the Netflix website"""
# pylint: disable=broad-except
try:
website.extract_session_data(self._get('profiles'))
self.update_session_data()
common.debug('Successfully refreshed session data')
return True
except InvalidMembershipStatusError:
raise
except WebsiteParsingError:
# it is possible that cookies may not work anymore,
# it should be due to updates in the website,
# this can happen when opening the addon while executing update_profiles_data
import traceback
common.warn('Failed to refresh session data, login expired (WebsiteParsingError)')
common.debug(traceback.format_exc())
self.session.cookies.clear()
return self._login()
except requests.exceptions.RequestException:
import traceback
common.warn('Failed to refresh session data, request error (RequestException)')
common.warn(traceback.format_exc())
if raise_exception:
raise
except Exception:
import traceback
common.warn('Failed to refresh session data, login expired (Exception)')
common.debug(traceback.format_exc())
self.session.cookies.clear()
if raise_exception:
raise
return False
def _login(self, modal_error_message=False):
raise NotImplementedError
def _prepare_request_properties(self, component, kwargs):
data = kwargs.get('data', {})
headers = kwargs.get('headers', {})
params = kwargs.get('params', {})
if component in ['set_video_rating', 'set_thumb_rating', 'update_my_list', 'pin_service']:
headers.update({
'Content-Type': 'application/json',
'Accept': 'application/json, text/javascript, */*'})
data['authURL'] = self.auth_url
data = json.dumps(data)
return data, headers, params
def _document_url(component):
return BASE_URL + URLS[component]['endpoint']
def _api_url(component):
return '{baseurl}{componenturl}'.format(
baseurl=g.LOCAL_DB.get_value('api_endpoint_url', table=TABLE_SESSION),
componenturl=URLS[component]['endpoint'])
def _raise_api_error(decoded_response):
if decoded_response.get('status', 'success') == 'error':
raise APIError(decoded_response.get('message'))
return decoded_response
|
[
"gottardo.stefano.83@gmail.com"
] |
gottardo.stefano.83@gmail.com
|
d702030522318f0d1aa5c9c134e670bf2dd23db5
|
10f091bf946bdd6b50c3fa0637504ab19d9c65c2
|
/albums/3/challenge41_easy/code.py
|
ad0dfe1fae293f17ce7bc56dcbb430c51c67f770
|
[] |
no_license
|
Rich43/rog
|
ccebee00b982579c46c30a7dab55b4dbe6396fdc
|
029dd57c920aa869750b809d22092c9614e67ba9
|
refs/heads/master
| 2023-01-23T07:07:16.069821
| 2023-01-19T19:10:43
| 2023-01-19T19:10:43
| 109,163,883
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 967
|
py
|
''' Write a program that will accept a sentence as input and then output that sentence surrounded by some type of an ASCII decoratoin banner.
Sample run:
Enter a sentence: So long and thanks for all the fish
Output
*****************************************
* *
* So long and thanks for all the fish *
* *
*****************************************
Bonus: If the sentence is too long, move words to the next line.
'''
def outer():
global leng
return ('x' * (leng +6))
def inner():
global leng
return ('x' + (' ' * (leng + 4)) + 'x')
def string():
global quote
return ('x' + ' ' * 2 + quote + ' ' * 2 + 'x')
if __name__ == '__main__':
#quote = input("Let's have a quote...: ")
quote = 'I am a python'
leng = len(quote)
out = outer()
inn = inner()
txt = string()
print(out + "\n" + inn + "\n" + txt + "\n" + inn + "\n" + out)
|
[
"richies@gmail.com"
] |
richies@gmail.com
|
fd1757cbaefda4ceaf1aa771e045b08774c21f1c
|
b7086d5e907aaf983af5b8d7d6f74c4fc6e40f23
|
/RA5/Skimmer/HLTSkimmer.py
|
eee2d8bf3b15d8d4b70614a251575da60d72ad3c
|
[] |
no_license
|
ahmad3213/PyNTupleProcessor
|
da40c596f275406f21e83e117c5b8020d6ee309c
|
c84fa597b132e91342226b12a74213f675c0b125
|
refs/heads/master
| 2023-03-29T18:59:19.454585
| 2020-09-21T21:52:25
| 2020-09-21T21:52:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,299
|
py
|
from Core.Module import Module
class HLTSkimmer(Module):
def __init__(self,name,emulation=False,cutflow="SR"):
super(HLTSkimmer,self).__init__(name)
self.emulation = emulation
self.cutflow = cutflow
def return_sr_trigger(self,event):
if self.emulation or (self.dataset.isData and "2016" in self.dataset.parent.name):
notRunH = ("2016H" not in self.dataset.parent.name and self.dataset.isData) or self.dataset.isMC
if event.htJet40[0] < 300.:
if abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 11:
return event.HLT_BIT_HLT_Ele23_Ele12_CaloIdL_TrackIdL_IsoVL_DZ_v[0]
elif abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 13:
if notRunH:
return event.HLT_BIT_HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_v[0] or event.HLT_BIT_HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_v[0]
else:
return event.HLT_BIT_HLT_Mu17_TrkIsoVVL_Mu8_TrkIsoVVL_DZ_v[0] or event.HLT_BIT_HLT_Mu17_TrkIsoVVL_TkMu8_TrkIsoVVL_DZ_v[0]
elif (abs(event.firstLep.pdgId) == 13 and abs(event.secondLep.pdgId) == 11) or (abs(event.firstLep.pdgId) == 11 and abs(event.secondLep.pdgId) == 13):
if notRunH:
return event.HLT_BIT_HLT_Mu23_TrkIsoVVL_Ele8_CaloIdL_TrackIdL_IsoVL_v[0] or event.HLT_BIT_HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_v[0]
else:
return event.HLT_BIT_HLT_Mu23_TrkIsoVVL_Ele8_CaloIdL_TrackIdL_IsoVL_DZ_v[0] or event.HLT_BIT_HLT_Mu8_TrkIsoVVL_Ele23_CaloIdL_TrackIdL_IsoVL_DZ_v[0]
else:
if notRunH:
if abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 11:
return event.HLT_BIT_HLT_DoubleMu8_Mass8_PFHT300_v[0]
elif abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 13:
return event.HLT_BIT_HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
elif (abs(event.firstLep.pdgId) == 13 and abs(event.secondLep.pdgId) == 11) or (abs(event.firstLep.pdgId) == 11 and abs(event.secondLep.pdgId) == 13):
return event.HLT_BIT_HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
else:
passTrig = False
if abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 11:
passTrig = event.HLT_BIT_HLT_DoubleMu8_Mass8_PFHT300_v[0]
elif abs(event.firstLep.pdgId) == abs(event.secondLep.pdgId) and abs(event.firstLep.pdgId) == 13:
passTrig = event.HLT_BIT_HLT_DoubleEle8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
elif (abs(event.firstLep.pdgId) == 13 and abs(event.secondLep.pdgId) == 11) or (abs(event.firstLep.pdgId) == 11 and abs(event.secondLep.pdgId) == 13):
passTrig = event.HLT_BIT_HLT_Mu8_Ele8_CaloIdM_TrackIdM_Mass8_PFHT300_v[0]
if not passTrig and self.dataset.isData:
return event.HLT_BIT_HLT_PFJet450_v[0]
else:
return passTrig
else:
raise RuntimeError,"Data other than 2016 are not supported atm"
def analyze(self,event):
if self.dataset.isMC and not self.emulation: return True
if not hasattr(event,"firstLep") or not hasattr(event,"secondLep"):
event.tightLeps.sort(key=lambda x: x.pt,reverse=True)
firstLep = event.tightLeps[0]
for l in event.tightLeps[1:]:
if l.charge*event.tightLeps[0].charge > 0.:
secondLep = l
event.firstLep = firstLep
event.secondLep = secondLep
if self.cutflow == "SR":
return self.return_sr_trigger(event)
elif self.cutflow == "TightLoose":
#return self.return_tl_trigger(event)
return self.return_sr_trigger(event)
else:
raise RuntimeError,"cutflow other than SR and TightLoose are not supported atm"
|
[
"lucien1011@gmail.com"
] |
lucien1011@gmail.com
|
60168d852d76649dbac368f415e7030b85d364e9
|
f134679dc39849cc741f5d8aaa63793d7c9f9b7d
|
/testapi/urls.py
|
36438b62909899e93887156062e759da560ad8a0
|
[] |
no_license
|
namraht/trial
|
792d7c7a427c463ab62b9675e745a7d537e3483c
|
3283ee39fcda03f5a1b1a04f3a4939d32ed40ac0
|
refs/heads/master
| 2020-12-04T01:04:27.056179
| 2016-09-07T21:38:14
| 2016-09-07T21:38:14
| 67,645,127
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 947
|
py
|
"""testapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from rest_framework.urlpatterns import format_suffix_patterns
from UserInfo import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^users/',views.UsersList.as_view()),
]
urlpatterns=format_suffix_patterns(urlpatterns)
|
[
"admin"
] |
admin
|
c160e52892a736da04cfcce881b61a37f4b39b87
|
e49a07ad215172e9c82cb418b10371bf0ce1c0f7
|
/第1章 python基础/Python基础06-面向对象1/2-创建一个对象.py
|
0b4e1cd81068687f7f9621c83d7dba09708014b8
|
[] |
no_license
|
taogangshow/python_Code
|
829c25a7e32ead388c8b3ffa763cb9cf587bfd7b
|
4b3d6992ec407d6069f3187ca7e402a14d863fff
|
refs/heads/master
| 2022-12-16T01:26:17.569230
| 2018-11-16T10:07:59
| 2018-11-16T10:07:59
| 157,832,985
| 0
| 1
| null | 2022-11-25T09:55:32
| 2018-11-16T08:00:13
|
Python
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
class Cat:
#属性
#方法
def eat(self):
print("猫在吃鱼...")
def drink(self):
print("猫在喝水...")
#创建一个对象
tom = Cat()
|
[
"cdtaogang@163.com"
] |
cdtaogang@163.com
|
f847516b8340b2693429d43db570e49faaff7d04
|
f3399d1ab1849b267cc83de30044dfe556598262
|
/src/main.py
|
705fe0947eac4f2daadef39724ef1951b6d1d92f
|
[] |
no_license
|
JiayuHeUSYD/DARNN_Multi_GPU
|
0114c263c026ca39e52fb2f47ed7031204a0d966
|
7e608237a0a7f8165e0e62d9ac50671346e7979b
|
refs/heads/master
| 2020-09-07T09:29:02.305668
| 2019-01-30T05:12:30
| 2019-01-30T05:12:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,562
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function, division
import torch
import argparse
import numpy as np
from tensorboardX import SummaryWriter
from DARNN import Encoder, Decoder
from CsiDataSet import CSI300Dataset
from VerParams import Version
def set_seed(seed=1):
'''
https://github.com/pytorch/pytorch/issues/11278
https://github.com/pytorch/pytorch/issues/11278
https://github.com/pytorch/pytorch/issues/12207
'''
import random
import os
random.seed(seed)
np.random.seed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
# Determinism Seed
set_seed()
from torch import nn
from torch import optim
# Parameters settings
parser = argparse.ArgumentParser(description="DA-RNN")
# Dataset setting
parser.add_argument(
'--norm_csi_dir',
type=str,
default='/project/chli/scp/CSI300_NORM/',
help='normalized csi300 csv dir')
parser.add_argument(
'--num_workers',
type=int,
default=12,
help='number of data loading workers (default 3)')
parser.add_argument(
'--dataset_split_ratio',
default=[0.8, 0.1, 0.1],
type=list,
help='train, valid, test dataset split ratio')
parser.add_argument(
'--x_columns',
default=['o', 'h', 'l', 'v', 'a'],
type=list,
help='list of features\' (X) column names')
parser.add_argument(
'--y_columns',
default=['c'],
type=list,
help='list of target (Y) column names')
parser.add_argument(
'--pin_memory', type=bool, default=True, help='pin memory page')
parser.add_argument(
'--debug', type=bool, default=False, help='debug with small data')
# Encoder / Decoder parameters setting
parser.add_argument(
'--hid_dim_encoder',
type=int,
default=32,
help='size of hidden states for the encoder m [64, 128]')
parser.add_argument(
'--hid_dim_decoder',
type=int,
default=32,
help='size of hidden states for the decoder p [64, 128]')
parser.add_argument(
'--ind_steps',
type=int,
default=0,
help='window length for computing indicator')
parser.add_argument(
'--lag_steps',
type=int,
default=20,
help='the number of lag time steps (history window length T)')
parser.add_argument(
'--pred_steps',
type=int,
default=1,
help='y_{t+pred_steps} = p(y_t,...,y_{timesteps-1}, x_t,...,x_{timesteps-1})'
)
# Training parameters setting
parser.add_argument(
'--param_version', type=int, default=None, help='int versioning params')
parser.add_argument(
'--epochs',
type=int,
default=10,
help='number of epochs to train [10, 200, 500]')
parser.add_argument(
'--lr',
type=float,
default=0.001,
help='learning rate [0.001] reduced by 0.1 after each 10000 iterations')
parser.add_argument('--seed', default=1, type=int, help='manual seed')
parser.add_argument(
'--batchsize', type=int, default=512, help='input batch size [128]')
parser.add_argument('--shuffle', type=bool, default=True, help='shuffle batch')
parser.add_argument(
'--task_type', default='single', type=str, help='single or multi')
parser.add_argument(
'--pred_type', default='shift', type=str, help='steps or shift')
# debug
parse_cli = False
opt = parser.parse_args('')
if parse_cli:
opt = parser.parse_args()
if __name__ == "__main__":
# debug
# from importlib import reload
opt.debug = False
opt.num_workers = 20
# import os
# os.environ['CUDA_VISIBLE_DEVICES'] = '0'
ver = Version()
ver.set_ver_opt(opt.param_version, opt)
suffix = 'L%dP%dHdim%d' % (opt.lag_steps, opt.pred_steps, opt.hid_dim_encoder)
writer = SummaryWriter(comment=suffix)
csi300 = CSI300Dataset()
train_dataset, valid_dataset, test_dataset, \
train_loader, valid_loader, test_loader = csi300.get_dataset_loader(
opt)
feat_dim = 13
encoder = Encoder(opt.lag_steps, feat_dim, opt.hid_dim_encoder)
decoder = Decoder(opt.lag_steps, opt.hid_dim_encoder, opt.hid_dim_decoder)
# device = ('cpu')
# Multi-GPU Support
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1:
encoder = nn.DataParallel(encoder)
decoder = nn.DataParallel(decoder)
encoder.to(device)
decoder.to(device)
criterion = nn.MSELoss()
encoder_optimizer = optim.Adam(encoder.parameters(), lr=opt.lr)
decoder_optimizer = optim.Adam(decoder.parameters(), lr=opt.lr)
# Train Loops
n_batches_count = 1
epoch_batch_loss_list = list()
for epoch in range(opt.epochs):
batch_loss_list = list()
for data_dict in train_loader:
# Prepare Data On Devices
X = data_dict['X'].type(torch.FloatTensor).to(device)
Y = data_dict['Y'].type(torch.FloatTensor).squeeze().to(device)
Ygt = data_dict['Y_gt'].type(torch.FloatTensor).to(device)
# Forward Pass
H = encoder(X)
Ypred = decoder(H, Y)
loss = criterion(Ypred.squeeze(), Ygt)
# Gradient Descent
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
# Log Stats
if n_batches_count % 100 == 0:
writer.add_scalar('train/loss', loss.item(), n_batches_count)
if n_batches_count % 50000 == 0:
for p in encoder_optimizer.param_groups:
p['lr'] *= 0.9
for p in decoder_optimizer.param_groups:
p['lr'] *= 0.9
n_batches_count += 1
print(batch_loss_list)
epoch_batch_loss_list.append(batch_loss_list)
|
[
"spacegoing@gmail.com"
] |
spacegoing@gmail.com
|
7b8b2c10c5c88c5070533d1be5d7e8280fd94ed0
|
3d060dd745ac19e58255843d496d6afe7168abe2
|
/work_for_aca_lsq/make_7122_intensities.py
|
5b203f1eb526a63b22446584054feab32121456f
|
[] |
no_license
|
nksauter/LS49
|
352e96e3601d2475f7f81e0c6a7e4771e9cf9911
|
e660c7395e3e3349d43ccd6e59cc099042c5c512
|
refs/heads/master
| 2023-05-27T01:50:34.996331
| 2023-05-15T22:09:56
| 2023-05-15T22:09:56
| 113,079,929
| 8
| 9
| null | 2023-05-10T18:37:14
| 2017-12-04T18:34:22
|
Python
|
UTF-8
|
Python
| false
| false
| 1,552
|
py
|
from __future__ import print_function
from __future__ import division
from six.moves import cPickle as pickle
from six.moves import range
if __name__=="__main__":
from LS49.sim.util_fmodel import gen_fmodel
from LS49.sim.step5_pad import pdb_lines,Fe_oxidized_model,Fe_reduced_model
W2 = 12398.425/7122.
GF = gen_fmodel(resolution=1.9,pdb_text=pdb_lines,algorithm="fft",wavelength=W2)
GF.set_k_sol(0.435)
GF.reset_wavelength(W2)
GF.reset_specific_at_wavelength(label_has="FE1",tables=Fe_oxidized_model,newvalue=W2)
GF.reset_specific_at_wavelength(label_has="FE2",tables=Fe_reduced_model,newvalue=W2)
W2_reduced = GF.get_intensities()
# Einsle paper: Reduced form has
# buried irons, FE1, in Fe(III) state (absorption at higher energy, oxidized)
# surface iron, FE2, in Fe(II) state (absorption at lower energy, reduced)
W2i = W2_reduced.indices()
with (open("debug26.data","w")) as F:
for iw in range(len(W2i)):
print ("%20s, %10.2f"%(W2_reduced.indices()[iw],W2_reduced.data()[iw]), file=F)
intensity_dict = {}
for iw in range(len(W2i)):
intensity_dict[W2_reduced.indices()[iw]] = W2_reduced.data()[iw]
with (open("debug26_intensities.pickle","wb")) as F:
pickle.dump(intensity_dict, F, pickle.HIGHEST_PROTOCOL)
with (open("sfall_7122_amplitudes.pickle","wb")) as F:
pickle.dump(GF.get_amplitudes(), F, pickle.HIGHEST_PROTOCOL)
GF.make_P1_primitive()
with (open("sfall_P1_7122_amplitudes.pickle","wb")) as F:
pickle.dump(GF.get_amplitudes(), F, pickle.HIGHEST_PROTOCOL)
|
[
"nksauter@lbl.gov"
] |
nksauter@lbl.gov
|
b0234fee86193c0f241ac55eb21416301a933d0c
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02785/s477774791.py
|
60aa3b22a33800e8d7a30bb8d0694975657ee378
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 134
|
py
|
N, K = map(int, input().split())
H = list(map(int, input().split()))
H = sorted(H)
hp = 0
for i in range(N-K):
hp += H[i]
print(hp)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
5a9ba52649275f8cb827ccb6185ac2b1cf2f8f62
|
4e0f2938b003f5d68a57f213e652fbffb2f72ba2
|
/FishStat_M.py
|
4cb6b0e27eeb6eb548d7e915ecb695342689ce9f
|
[] |
no_license
|
adcGG/Lianxi
|
e4b1ce0d3cfc76e625e1e1caca0a58f25ba5d692
|
3659c3ca11a13b4ad54dbd2e669949701bae10b5
|
refs/heads/master
| 2022-12-13T05:45:41.312292
| 2019-08-14T07:38:19
| 2019-08-14T07:38:19
| 201,189,540
| 0
| 1
| null | 2022-04-22T22:08:16
| 2019-08-08T06:07:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,483
|
py
|
def day_stat(day,fishs):
'''
:param day: 为字符串参数
:param fishs: 为两层嵌套字典参数
:return:
'''
nums = 0
amount = 0
for name0,sub_records in fishs.items():
print('%s 数量 %d 单价%.2f元'%(name0,sub_records[0],sub_records[1]))
nums+=sub_records[0]
amount += sub_records[0]*sub_records[1]
print('%s 数量小计%d金额小计%.2f'%(day,nums,amount))
def allday_stat(fish,maxs):
'''
统计所有鱼,并保存到统计字典里
:param fish: 为两层嵌套字典参数
:return:
'''
name1 = ""
sub_record = {}
stat_record = {}
for day,day_record in fish.items():
for name1,sub_record in day_record.items():
if name1 in stat_record:
stat_record[name1][0]+=sub_record[0]
stat_record[name1][1] += sub_record[0]*sub_record[1]
else:
stat_record[name1] = [sub_record[0],sub_record[0]*sub_record[1]]
for name1,nums in stat_record.items():
if maxs[1] < nums[0]:
maxs[0] = name1
maxs[1] = nums[0]
if maxs[3] <nums[1]:
maxs[2] = name1
maxs[3] = nums[1]
maxs[4] = maxs[4]+nums[0]
maxs[5] = maxs[5]+nums[1]
return stat_record
def PrintMaxValues(maxstat1):
'''
打印最大值
:param maxstat1:[:4]为列表参数,记录最大值。[4]记录总数量 。 [5]记录总金额
:return:
'''
print('最大数量的鱼是%s,%d条'%(maxstat1[0],maxstat1[1]))
print('最大金额的鱼是%s,%.2f元'%(maxstat1[2],maxstat1[3]))
print('钓鱼总数量为%d,总金额为%.2f元'%(maxstat1[4],maxstat1[5]))
|
[
"979818137@11.com"
] |
979818137@11.com
|
883879f04c62bf4e480cf92b5d51696e945aea20
|
fc4fb632da74ba1b535192f26f64cbb3aa124c2d
|
/tests/scripts/thread-cert/Cert_5_3_08_ChildAddressSet.py
|
9613af53472edcfbdbfd5043edf8aaa94ad564c2
|
[
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
sbobrowicz/openthread
|
dc1fd4caed785a5d3ff9365530b0030e3498d3eb
|
a43fb455d99d3692bdc68aa6d9be96f973a1a4ea
|
refs/heads/master
| 2021-01-16T23:19:11.674806
| 2016-06-24T19:43:02
| 2016-06-24T19:43:02
| 61,918,103
| 1
| 1
| null | 2016-06-24T23:38:19
| 2016-06-24T23:38:18
| null |
UTF-8
|
Python
| false
| false
| 4,073
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2016, Nest Labs, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import pexpect
import time
import unittest
import node
LEADER = 1
ED1 = 2
ED2 = 3
ED3 = 4
ED4 = 5
class Cert_5_3_8_ChildAddressSet(unittest.TestCase):
def setUp(self):
self.nodes = {}
for i in range(1,6):
self.nodes[i] = node.Node(i)
self.nodes[LEADER].set_panid(0xface)
self.nodes[LEADER].set_mode('rsdn')
self.nodes[LEADER].add_whitelist(self.nodes[ED1].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED2].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED3].get_addr64())
self.nodes[LEADER].add_whitelist(self.nodes[ED4].get_addr64())
self.nodes[LEADER].enable_whitelist()
self.nodes[ED1].set_panid(0xface)
self.nodes[ED1].set_mode('rsn')
self.nodes[ED1].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED1].enable_whitelist()
self.nodes[ED2].set_panid(0xface)
self.nodes[ED2].set_mode('rsn')
self.nodes[ED2].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED2].enable_whitelist()
self.nodes[ED3].set_panid(0xface)
self.nodes[ED3].set_mode('rsn')
self.nodes[ED3].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED3].enable_whitelist()
self.nodes[ED4].set_panid(0xface)
self.nodes[ED4].set_mode('rsn')
self.nodes[ED4].add_whitelist(self.nodes[LEADER].get_addr64())
self.nodes[ED4].enable_whitelist()
def tearDown(self):
for node in self.nodes.itervalues():
node.stop()
del self.nodes
def test(self):
self.nodes[LEADER].start()
self.nodes[LEADER].set_state('leader')
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[ED1].start()
time.sleep(3)
self.assertEqual(self.nodes[ED1].get_state(), 'child')
self.nodes[ED2].start()
time.sleep(3)
self.assertEqual(self.nodes[ED2].get_state(), 'child')
self.nodes[ED3].start()
time.sleep(3)
self.assertEqual(self.nodes[ED3].get_state(), 'child')
self.nodes[ED4].start()
time.sleep(3)
self.assertEqual(self.nodes[ED4].get_state(), 'child')
for i in range(2,6):
addrs = self.nodes[i].get_addrs()
for addr in addrs:
if addr[0:4] != 'fe80':
self.nodes[LEADER].ping(addr)
if __name__ == '__main__':
unittest.main()
|
[
"jonhui@nestlabs.com"
] |
jonhui@nestlabs.com
|
7274b14eb762afe828b515ecea48e3d6adf0ee84
|
818e11a0545de5ed0337e5baa4b92a732bd79521
|
/leetcode/python/128_Longest_Consecutive_Sequence.py
|
7262c304222f95a03e296ebf7273426408d6570c
|
[] |
no_license
|
JaySurplus/online_code
|
85300fb63dd4020d9135e32dfad5792850d335f6
|
8f44df0bcb521bbc3a7ff2564cbe931e146ae297
|
refs/heads/master
| 2021-01-20T09:07:30.038815
| 2018-08-21T15:08:01
| 2018-08-21T15:08:01
| 34,469,525
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,040
|
py
|
"""
128. Longest Consecutive Sequence
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
For example,
Given [100, 4, 200, 1, 3, 2],
The longest consecutive elements sequence is [1, 2, 3, 4]. Return its length: 4.
Your algorithm should run in O(n) complexity.
"""
import time
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums = set(nums)
dic = {}
#for i in nums:
# print i
best = 0
while nums:
m = n = nums.pop()
while m - 1 in nums:
nums.remove(m-1)
m -= 1
while n + 1 in nums:
nums.remove(n+1)
n += 1
best = max(best , n - m +1)
return best
sol = Solution()
nums = [100,101,102,103 ,5,4,200,6,8,201,7,1,3,2 , 105 ,104]
#nums = [1,-8,7,-2,-4,-4,6,3,-4,0,-7,-1,5,1,-9,-3]
#nums = [1,2,3,4,5,0, -1]
res = sol.longestConsecutive(nums)
print res
|
[
"znznbest2004@gmail.com"
] |
znznbest2004@gmail.com
|
ea3d92ac6eaa0e3765679bde1dcdd3826f9104b9
|
4a8c1f7d9935609b780aff95c886ef7781967be0
|
/Flask/module/root/__init__.py
|
9e57fe84d463cdf6098fd2ee5cf2c0332eda0a49
|
[] |
no_license
|
recuraki/PythonJunkTest
|
d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a
|
2556c973d468a6988d307ce85c5f2f8ab15e759a
|
refs/heads/master
| 2023-08-09T17:42:21.875768
| 2023-07-18T23:06:31
| 2023-07-18T23:06:31
| 13,790,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 317
|
py
|
#!env python
# coding:utf-8
from flask import Flask
from hoge.main import hoge
app = Flask(__name__)
app.debug = True
app.config.from_envvar('FLASK_APP_SETTINGS', silent=True)
app.register_module(hoge, url_prefix="/moge")
@app.route("/")
def index():
return("index")
if __name__ == '__main__':
app.run()
|
[
"glenda.kanai@gmail.com"
] |
glenda.kanai@gmail.com
|
221143b22b0808f84df7a4d31d7bc24371fbb9bf
|
6189f34eff2831e3e727cd7c5e43bc5b591adffc
|
/WebMirror/management/rss_parser_funcs/feed_parse_extractTxytranslationsWordpressCom.py
|
65323eb27a4c3c955883db9a3025cf3603421cb6
|
[
"BSD-3-Clause"
] |
permissive
|
fake-name/ReadableWebProxy
|
24603660b204a9e7965cfdd4a942ff62d7711e27
|
ca2e086818433abc08c014dd06bfd22d4985ea2a
|
refs/heads/master
| 2023-09-04T03:54:50.043051
| 2023-08-26T16:08:46
| 2023-08-26T16:08:46
| 39,611,770
| 207
| 20
|
BSD-3-Clause
| 2023-09-11T15:48:15
| 2015-07-24T04:30:43
|
Python
|
UTF-8
|
Python
| false
| false
| 570
|
py
|
def extractTxytranslationsWordpressCom(item):
'''
Parser for 'txytranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
[
"something@fake-url.com"
] |
something@fake-url.com
|
f8957ea5ea0f59fe279970da77af2889af7bfebd
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_noting.py
|
23cacc23506f5124a72149831f5c2e50cc7fbca8
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 216
|
py
|
#calss header
class _NOTING():
def __init__(self,):
self.name = "NOTING"
self.definitions = note
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['note']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
afa762b941f1e50c05f69a041402ec8dac2894e1
|
a2c7bc7f0cf5c18ba84e9a605cfc722fbf169901
|
/python_1_to_1000/871_Minimum_Number_of_Refueling_Stops.py
|
cd4663edd331b07e501988b519ace8b0387a1bb5
|
[] |
no_license
|
jakehoare/leetcode
|
3bf9edd499034ce32be462d4c197af9a8ed53b5d
|
05e0beff0047f0ad399d0b46d625bb8d3459814e
|
refs/heads/master
| 2022-02-07T04:03:20.659422
| 2022-01-26T22:03:00
| 2022-01-26T22:03:00
| 71,602,471
| 58
| 38
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,363
|
py
|
_author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/minimum-number-of-refueling-stops/
# A car travels from a starting position to a destination which is target miles east of the starting position.
# Along the way, there are gas stations.
# Each station[i] represents a gas station that is station[i][0] miles east of the starting position,
# and has station[i][1] liters of gas.
# The car starts with an infinite tank of gas, which initially has startFuel liters of fuel in it.
# It uses 1 liter of gas per 1 mile that it drives.
# When the car reaches a gas station, it may stop and refuel, transferring all the gas from the station into the car.
# What is the least number of refueling stops the car must make in order to reach its destination?
# If it cannot reach the destination, return -1.
# Note that if the car reaches a gas station with 0 fuel left, the car can still refuel there.
# If the car reaches the destination with 0 fuel left, it is still considered to have arrived.
# Maintain a heap of fuel at previous stations that has not been used. At each station the total fuel used must not be
# less than the distance. If it is less, use fuel from previous stations starting with the largest amounts. If no more
# fuel is unused, we cannot reach the target.
# Time - O(n log n)
# Space - O(n)
import heapq
class Solution:
def minRefuelStops(self, target, startFuel, stations):
"""
:type target: int
:type startFuel: int
:type stations: List[List[int]]
:rtype: int
"""
stops = 0
fuel = startFuel # total fuel used
past_fuels = [] # heap of unused fuel from previous stations
stations.append([target, 0]) # target is beyond final station
for distance, station_fuel in stations:
while fuel < distance: # cannot reach this station without more fuel
if not past_fuels: # no more unused previous stations
return -1
fuel -= heapq.heappop(past_fuels) # use the previous station with the most fuel
stops += 1
heapq.heappush(past_fuels, -station_fuel) # add this station's fuel to unused fuel
return stops
|
[
"jake_hoare@hotmail.com"
] |
jake_hoare@hotmail.com
|
64a41985166cecbfeb82821809d57b7442637d8d
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/CaptureCreateDTO.py
|
aab34c8dce4d17455c21624f902a4b43cf45d6cc
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771
| 2023-08-23T07:12:26
| 2023-08-23T07:12:26
| 133,338,689
| 247
| 70
|
Apache-2.0
| 2023-04-25T04:54:02
| 2018-05-14T09:40:54
|
Python
|
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class CaptureCreateDTO(object):
def __init__(self):
self._capture_no = None
self._out_biz_no = None
@property
def capture_no(self):
return self._capture_no
@capture_no.setter
def capture_no(self, value):
self._capture_no = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
def to_alipay_dict(self):
params = dict()
if self.capture_no:
if hasattr(self.capture_no, 'to_alipay_dict'):
params['capture_no'] = self.capture_no.to_alipay_dict()
else:
params['capture_no'] = self.capture_no
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = CaptureCreateDTO()
if 'capture_no' in d:
o.capture_no = d['capture_no']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
return o
|
[
"jiandong.jd@antfin.com"
] |
jiandong.jd@antfin.com
|
e5d4969ae671189786a083b8438e5cbbb026b013
|
59fbeea017110472a788218db3c6459e9130c7fe
|
/maximum-swap/maximum-swap.py
|
3e4a1fe1c9171bb242c09a041e623ad88bc2d7e4
|
[] |
no_license
|
niufenjujuexianhua/Leetcode
|
82b55d9382bc9f63f4d9da9431194e20a4d299f1
|
542c99e038d21429853515f62af51a77deaa4d9c
|
refs/heads/master
| 2022-04-27T16:55:00.035969
| 2022-03-10T01:10:04
| 2022-03-10T01:10:04
| 79,742,663
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 342
|
py
|
class Solution:
def maximumSwap(self, num: int) -> int:
A =list(str(num))
ans = A[:]
for i in range(len(A)):
for j in range(i+1,len(A)):
A[i],A[j] = A[j],A[i]
if A>ans:
ans = A[:]
A[i],A[j] = A[j],A[i]
return int(''.join(ans))
|
[
"wutuo123@yeah.net"
] |
wutuo123@yeah.net
|
6b9dec43d43bea442c82ebcbf64509eb70bf7973
|
68d267a3e352e40dd2e21359fabb7c97ce9c26aa
|
/2SAT/cases.py
|
603243d0922ff178a90b672d9cd64b8675e1476a
|
[] |
no_license
|
danyuanwang/karatsuba_mult
|
ac2ad60e98c05910036483f8e418b8478b27081f
|
a357af85e094f5836c2fbdabebf141b027a13076
|
refs/heads/master
| 2023-06-11T06:43:43.504877
| 2021-07-03T21:05:09
| 2021-07-03T21:05:09
| 285,746,475
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,036
|
py
|
import math
class Cases:
def __init__(self, link):
self.size = 0
self.cases = []
handle = open(link)
for line in handle:
a = line.split()
#print(a)
res = [int(i) for i in a]
self.cases.append(res)
self.size += 1
for j in range(100):
removableValues = []
absRemovableValues = []
counter = 0
for case in self.cases:
counter += 1
print(j, counter, len(self.cases)," init")
for value in case:
if abs(value) not in absRemovableValues:
removableValues.append(value)
absRemovableValues.append(abs(value))
else:
if -value in removableValues:
removableValues.remove(-value)
#absRemovableValues.remove(abs(value))
for case in self.cases:
for value in case:
if value in removableValues:
self.cases.remove(case)
break
#test one case in the set
#true if the first value or the second value == true
#* negative indicates not
def test_case(self, index, values):
#print(int(self.cases[index][0]))
value1 = values[abs(self.cases[index][0])]
value2 = values[abs(self.cases[index][1])]
if self.cases[index][0] < 0:
value1 = not value1
if self.cases[index][1] < 0:
value2 = not value2
return value1 or value2
#test all cases using test above if there is one that evaluates to false return
#that case, otherwise return -1
def test_all_cases(self, values):
for index in range(len(self.cases)):
result = self.test_case(index, values)
if not result:
return index
return -1
|
[
"danyuanwang@hotmail.com"
] |
danyuanwang@hotmail.com
|
41d25d6445a09f1adb75f00de0d32733a3f6f56a
|
e408a1c27efcafaec1f6bf1e6255075eae171102
|
/LocationAdapter/src/server.py
|
57e659b1da6992afaa047a4ab336305e30ea855a
|
[] |
no_license
|
adoggie/BlueEarth
|
acd609a5da9eb0276dc810776f97a35c5b5ccf8c
|
ef2c4d045d8d933bcbf5789a87a189be6b33947d
|
refs/heads/master
| 2020-03-29T15:31:54.834861
| 2018-12-23T16:03:59
| 2018-12-23T16:03:59
| 150,067,522
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
#--coding:utf-8--
import traceback
from datetime import datetime
from logging import getLogger
import gevent
import gevent.socket
from gevent.server import StreamServer
import gevent.event
import gevent.ssl
from mantis.fundamental.utils.importutils import import_class
from mantis.fundamental.application.app import instance
class SocketClientIdentifier(object):
def __init__(self):
self.unique_id = '' # 可以是连接设备的唯一设备标识
self.props = {}
class SocketConnection(object):
def __init__(self,sock,consumer,server=None):
self.server = server
self.sock = sock
self.consumer = consumer
self.datetime = None
self.client_id = SocketClientIdentifier()
def getAddress(self):
return 'RpcConnectionSocket:'+str(self.sock.getsockname())
def open(self):
self.datetime = datetime.now()
return True
def close(self):
if self.sock:
self.sock.close()
self.sock = None
def sendData(self,data):
self.sock.sendall(data)
instance.getLogger().debug( 'sent >> ' + self.hex_dump(data) )
def hex_dump(self, bytes):
dump = ' '.join(map(hex, map(ord, bytes)))
return dump
def recv(self):
while True:
try:
d = self.sock.recv(1000)
if not d:
break
except:
# traceback.print_exc()
break
try:
self.consumer.onData(d)
except:
instance.getLogger().error(traceback.format_exc())
# traceback.print_exc()
instance.getLogger().debug( 'socket disconnected!' )
self.sock = None
class DataConsumer(object):
def __init__(self,accumulator,handler):
self.accumulator = accumulator
self.handler = handler
def onData(self,bytes):
messages = self.accumulator.enqueue(bytes)
for message in messages:
self.handler.handle(message)
class Server(object):
def __init__(self):
self.cfgs = None
self.conns = []
self.server = None
@property
def name(self):
return self.cfgs.get('name')
def init(self,cfgs):
self.cfgs = cfgs
return self
def stop(self):
self.server.stop()
def start(self):
ssl = self.cfgs.get('ssl')
if ssl:
self.server = StreamServer((self.cfgs.get('host'),self.cfgs.get('port')),
self._service,keyfile=self.cfgs.get('keyfile'),
certfile=self.cfgs.get('certfile'))
else:
self.server = StreamServer((self.cfgs.get('host'),self.cfgs.get('port')), self._service)
print 'socket server started!'
self.server.start() #serve_forever() , not block
def _service(self,sock,address):
cfgs = self.cfgs.get('accumulator')
accCls = import_class(cfgs.get('class'))
acc = accCls().init(cfgs)
cfgs = self.cfgs.get('handler')
handlerCls = import_class(cfgs.get('class'))
handler = handlerCls().init(cfgs)
# consumer = DataConsumer(acc,handler)
conn = SocketConnection(sock,handler,self)
self.addConnection(conn)
# handler.setConnection(conn)
handler.setAccumulator(acc)
handler.onConnected(conn,address)
conn.recv()
self.removeConnection(conn)
handler.onDisconnected()
def sendMessage(self,m):
pass
def addConnection(self,conn):
self.conns.append(conn)
def removeConnection(self,conn):
self.conns.remove(conn)
|
[
"24509826@qq.com"
] |
24509826@qq.com
|
b676e67a2be2fd461702f27946b6db58bf5a602a
|
1d342125c0f14dcbd56d02f6b85d40beb19b5a10
|
/interfaces/animal/stagnant.py
|
192590bd354dc9952090ccd44616cff669a99897
|
[] |
no_license
|
nss-cohort-36/keahua-arboretum-digital-destroyers
|
fb570a124c9c77a901722d271ca18c38742a2a37
|
7052fece09c00c32d2c245989315ee5695219a15
|
refs/heads/master
| 2020-12-19T04:25:13.269263
| 2020-01-29T17:40:00
| 2020-01-29T17:40:00
| 235,619,937
| 0
| 0
| null | 2020-01-29T18:01:38
| 2020-01-22T16:54:07
|
Python
|
UTF-8
|
Python
| false
| false
| 148
|
py
|
from .aquatic import IAquatic
class IStagnant(IAquatic):
def __init__(self):
super().__init__()
self.dwell_type = "stillwater"
|
[
"matthewscottblagg@gmail.com"
] |
matthewscottblagg@gmail.com
|
eb99914b6fe3e8f58cfe85a0b2d331a33565fb19
|
60341a48087ba9683a8ee773237426d6e9411cf2
|
/hubspot/crm/objects/feedback_submissions/models/previous_page.py
|
507d39e5b4f2a07a20d0c700b047b86283d10caa
|
[] |
no_license
|
dalmaTeam/hubspot-api-python
|
86051bc676c4e007f23d7e7b759a3481c13fdea6
|
7c60c0f572b98c73e1f1816bf5981396a42735f6
|
refs/heads/master
| 2023-06-29T10:53:04.404611
| 2021-07-23T14:03:03
| 2021-07-23T14:03:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,064
|
py
|
# coding: utf-8
"""
Feedback Submissions
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v3
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from hubspot.crm.objects.feedback_submissions.configuration import Configuration
class PreviousPage(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'before': 'str',
'link': 'str'
}
attribute_map = {
'before': 'before',
'link': 'link'
}
def __init__(self, before=None, link=None, local_vars_configuration=None): # noqa: E501
"""PreviousPage - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._before = None
self._link = None
self.discriminator = None
self.before = before
if link is not None:
self.link = link
@property
def before(self):
"""Gets the before of this PreviousPage. # noqa: E501
:return: The before of this PreviousPage. # noqa: E501
:rtype: str
"""
return self._before
@before.setter
def before(self, before):
"""Sets the before of this PreviousPage.
:param before: The before of this PreviousPage. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and before is None: # noqa: E501
raise ValueError("Invalid value for `before`, must not be `None`") # noqa: E501
self._before = before
@property
def link(self):
"""Gets the link of this PreviousPage. # noqa: E501
:return: The link of this PreviousPage. # noqa: E501
:rtype: str
"""
return self._link
@link.setter
def link(self, link):
"""Sets the link of this PreviousPage.
:param link: The link of this PreviousPage. # noqa: E501
:type: str
"""
self._link = link
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreviousPage):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PreviousPage):
return True
return self.to_dict() != other.to_dict()
|
[
"plaurynovich@hubspot.com"
] |
plaurynovich@hubspot.com
|
7717d5b027b8dd04eae03dca16efba7adc05a5a9
|
e57d7785276053332c633b57f6925c90ad660580
|
/sdk/containerservice/azure-mgmt-containerservice/azure/mgmt/containerservice/v2021_02_01/aio/operations/_resolve_private_link_service_id_operations.py
|
305883969cbd73df32e03192f06cfe879f752c03
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
adriananeci/azure-sdk-for-python
|
0d560308497616a563b6afecbb494a88535da4c5
|
b2bdfe659210998d6d479e73b133b6c51eb2c009
|
refs/heads/main
| 2023-08-18T11:12:21.271042
| 2021-09-10T18:48:44
| 2021-09-10T18:48:44
| 405,684,423
| 1
| 0
|
MIT
| 2021-09-12T15:51:51
| 2021-09-12T15:51:50
| null |
UTF-8
|
Python
| false
| false
| 5,539
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ResolvePrivateLinkServiceIdOperations:
"""ResolvePrivateLinkServiceIdOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def post(
self,
resource_group_name: str,
resource_name: str,
parameters: "_models.PrivateLinkResource",
**kwargs: Any
) -> "_models.PrivateLinkResource":
"""Gets the private link service ID for the specified managed cluster.
Gets the private link service ID the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param parameters: Parameters (name, groupId) supplied in order to resolve a private link
service ID.
:type parameters: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateLinkResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkResource, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateLinkResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.post.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateLinkResource')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
post.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/resolvePrivateLinkServiceId'} # type: ignore
|
[
"noreply@github.com"
] |
adriananeci.noreply@github.com
|
c893d4f0178ef7371b5ccb8bdde93d4981590a80
|
7699cd22ca370c89fb949eca80c587c0c3a9f8d5
|
/clients/opening_quote/high_price.py
|
cd9e9be7c05aceb3fc8152f88c399ef07a8e4ad4
|
[] |
no_license
|
ccliuyang/trader
|
8bc2d5f144784c9deca92908c209dc9dd69da4f2
|
f7975352bd6c2f34e164a87b03c0cc5e02b48752
|
refs/heads/master
| 2023-02-10T07:44:24.498417
| 2021-01-12T02:06:08
| 2021-01-12T02:06:08
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,732
|
py
|
from gevent import monkey; monkey.patch_all()
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), *(['..' + os.sep] * 2))))
import gevent
from clients.common import morning_client
from datetime import datetime, date, timedelta, time
from morning.back_data import holidays
from morning_server import stock_api, message
from gevent.queue import Queue
from pymongo import MongoClient
from configs import db
from morning.pipeline.converter import dt
import pandas as pd
import numpy as np
import daydata
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import order
# Kospi D VI: 3%, KOSDAQ: 6%
# 1. Point, Year High(3), TodayHigh(1), from yesterday to today(2)
# from current price and add most highest candle and check what point you can get
# find strongest momentum (from yesterday to today -> 1 min amount, exceed within 10 sec)
def find_open_time(one_min_dict):
earlist_time = None
for k, v in one_min_dict.items():
for t in v:
if t['market_type'] == 50:
if earlist_time is None:
earlist_time = t['date']
elif earlist_time > t['date']:
earlist_time = t['date']
break
return earlist_time
def find_candidate(tdate, codes, start_datetime=None):
yesterday = holidays.get_yesterday(tdate)
daydata.load_day_data(yesterday, codes, False)
candidate = []
one_minute_dict = {}
codes = list(filter(lambda x: daydata.has_day_data(x), codes))
for code in codes:
start_datetime = datetime.combine(tdate, time(8, 59, 59))
ticks = morning_client.get_tick_data_by_datetime(code, start_datetime, start_datetime + timedelta(minutes=1))
one_minute_dict[code] = ticks
if start_datetime is None:
start_datetime = find_open_time(one_minute_dict)
print(tdate, 'START TICK TIME', start_datetime)
for code in codes:
is_kospi = morning_client.is_kospi_code(code)
quote_amount = 0
open_price = 0
cname = ''
is_started = False
all_amount = 0
bias_amount = 0
for t in one_minute_dict[code]:
if not is_started:
if t['market_type'] == 50:
if t['time'] > 900:
break
quote_amount = t['cum_amount'] * (10000 if is_kospi else 1000)
open_price = t['current_price']
cname = t['company_name']
is_started = True
else:
if t['date'] > start_datetime + timedelta(seconds=10):
candidate.append({'code': code,
'name': cname,
'ratio': quote_amount / daydata.get_yesterday_amount(code),
'starter_ratio': all_amount / quote_amount,
'bias_amount': bias_amount > 0,
'current_percent': (t['current_price'] - open_price) / open_price * 100.0})
break
all_amount += t['volume'] * t['current_price']
if t['buy_or_sell'] == 49:
bias_amount += t['volume'] * t['current_price']
else:
bias_amount -= t['volume'] * t['current_price']
return candidate, start_datetime
def start_trading(tdate, codes, start_datetime):
for code in codes:
print('start_trading', code)
start_datetime = start_datetime + timedelta(seconds=11)
ticks = morning_client.get_tick_data_by_datetime(code, start_datetime, start_datetime + timedelta(minutes=10))
order_tick = ticks[0]
order.add_order(code, order_tick, [order_tick['ask_price'] * 1.03])
for t in ticks[1:-1]:
if not order.check_tick(code, t):
break
order.finalize(code, ticks[-1])
if __name__ == '__main__':
all_codes = morning_client.get_all_market_code() # for is_kospi
#all_codes = ['A326030', 'A002100'] # 8/10 datetime(2020, 8, 10, 8, 59, 59)
#all_codes = ['A128940', 'A060150', 'A005257'] # datetime(2020, 8, 6, 9, 0, 0, 503000)
# 8/7, 8/11 empty
start_dt = datetime(2020, 8, 20).date()
while start_dt <= datetime(2020, 8, 20).date():
if holidays.is_holidays(start_dt) or datetime(2020, 8, 12).date() == start_dt or datetime(2020, 8, 13).date() == start_dt:
start_dt += timedelta(days=1)
continue
tdate = start_dt
candidate, start_datetime = find_candidate(tdate, all_codes, None)
#print(candidate)
sorted_by_ratio = sorted(candidate, key=lambda x: x['ratio'], reverse=True)
sorted_by_ratio = sorted_by_ratio[:20]
sorted_by_ratio = list(filter(lambda x: x['bias_amount'] and 0.5 < x['current_percent'] <= 5, sorted_by_ratio))
#sorted_by_ratio = sorted(sorted_by_ratio, key=lambda x: x['starter_ratio'], reverse=True)
sorted_by_profit = sorted(sorted_by_ratio, key=lambda x: x['current_percent'], reverse=True)
filtered_codes = [t['code'] for t in sorted_by_ratio[:5]]
print(filtered_codes)
start_trading(tdate, filtered_codes, start_datetime)
start_dt += timedelta(days=1)
#df = pd.DataFrame(order._bills)
#df.to_excel('trade_bills.xlsx')
"""
start_trading A326030
ORDER {'code': 'A326030', 'date': datetime.datetime(2020, 8, 10, 9, 0, 11, 8000), 'bought': 186500, 'target': 189297.49999999997}
{'code': 'A326030', 'btime': datetime.datetime(2020, 8, 10, 9, 0, 11, 8000), 'stime': datetime.datetime(2020, 8, 10, 9, 7, 13, 728000), 'bought': 186500, 'sell': 185000, 'profit': '-1.08', 'reason': 'CUT', 'scount': 0, 'fcount': 1}
start_trading A002100
ORDER {'code': 'A002100', 'date': datetime.datetime(2020, 8, 10, 9, 0, 11, 204000), 'bought': 16650, 'target': 16899.75}
{'code': 'A002100', 'btime': datetime.datetime(2020, 8, 10, 9, 0, 11, 204000), 'stime': datetime.datetime(2020, 8, 10, 9, 1, 41, 688000), 'bought': 16650, 'sell': 16900, 'profit': '1.22', 'reason': 'PROFIT', 'scount': 1, 'fcount': 1}
start_trading A185750
ORDER {'code': 'A185750', 'date': datetime.datetime(2020, 8, 10, 9, 0, 11, 47000), 'bought': 177500, 'target': 180162.49999999997}
{'code': 'A185750', 'btime': datetime.datetime(2020, 8, 10, 9, 0, 11, 47000), 'stime': datetime.datetime(2020, 8, 10, 9, 7, 49, 727000), 'bought': 177500, 'sell': 175500, 'profit': '-1.40', 'reason': 'CUT', 'scount': 1, 'fcount': 2}
start_trading A128940
ORDER {'code': 'A128940', 'date': datetime.datetime(2020, 8, 6, 9, 0, 11, 22000), 'bought': 383000, 'target': 388744.99999999994}
{'code': 'A128940', 'btime': datetime.datetime(2020, 8, 6, 9, 0, 11, 22000), 'stime': datetime.datetime(2020, 8, 6, 9, 0, 19, 725000), 'bought': 383000, 'sell': 380000, 'profit': '-1.06', 'reason': 'CUT', 'scount': 0, 'fcount': 1}
start_trading A060150
ORDER {'code': 'A060150', 'date': datetime.datetime(2020, 8, 6, 9, 0, 11, 59000), 'bought': 10500, 'target': 10657.499999999998}
{'code': 'A060150', 'btime': datetime.datetime(2020, 8, 6, 9, 0, 11, 59000), 'stime': datetime.datetime(2020, 8, 6, 9, 0, 19, 999000), 'bought': 10500, 'sell': 10700, 'profit': '1.62', 'reason': 'PROFIT', 'scount': 1, 'fcount': 1}
start_trading A005257
ORDER {'code': 'A005257', 'date': datetime.datetime(2020, 8, 6, 9, 0, 12, 47000), 'bought': 247000, 'target': 250704.99999999997}
{'code': 'A005257', 'btime': datetime.datetime(2020, 8, 6, 9, 0, 12, 47000), 'stime': datetime.datetime(2020, 8, 6, 9, 0, 41, 577000), 'bought': 247000, 'sell': 251500, 'profit': '1.54', 'reason': 'PROFIT', 'scount': 2, 'fcount': 1}
"""
|
[
"nnnlife@gmail.com"
] |
nnnlife@gmail.com
|
31b8195ebc6b66e72576c39573db54f394df7e49
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p04013/s491767407.py
|
0a1b3009b328e28350daacffd06009c1c38057f1
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 641
|
py
|
N, A = map(int, input().split()) # N枚数のカード, 平均をAにしたい
X = tuple(map(int, input().split())) # N枚のカードの中身
dp = [[[0]*(50*N+2) for _ in range(N+2)] for _ in range(N+2)]
dp[0][0][0] = 1 # 0枚の中から0枚選んで合計が0になる選び方が1通りある
for i in range(N):
for j in range(N+1):
for k in range(50*N+1):
# if dp[i][j][k]: # ここの分岐が分からん
if k+X[i] < 50*N+2:
dp[i+1][j][k] += dp[i][j][k]
dp[i+1][j+1][k+X[i]] += dp[i][j][k]
ans = 0
for i in range(1, N+1):
ans += dp[N][i][i*A]
print(ans)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
05908f9e878730f361c397fe8138bb1d4e96e6e4
|
f4e45e2f6a6c42571eefdc64773ca83c6b9c2b98
|
/lib/telepot2/filtering.py
|
3b6aca406e3f6ffe1af3f1a3657701be9b06abc3
|
[] |
no_license
|
soju6jan2/sjva2_src_obfuscate
|
83659707ca16d94378b7eff4d20e5e7ccf224007
|
e2dd6c733bbf34b444362011f11b5aca2053aa34
|
refs/heads/master
| 2023-04-21T12:27:01.132955
| 2021-05-06T17:35:03
| 2021-05-06T17:35:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 929
|
py
|
def pick(obj,keys):
def pick1(k):
if type(obj)is dict:
return obj[k]
else:
return getattr(obj,k)
if isinstance(keys,list):
return[pick1(k)for k in keys]
else:
return pick1(keys)
def match(data,template):
if isinstance(template,dict)and isinstance(data,dict):
def pick_and_match(kv):
template_key,template_value=kv
if hasattr(template_key,'search'):
data_keys=list(filter(template_key.search,data.keys()))
if not data_keys:
return False
elif template_key in data:
data_keys=[template_key]
else:
return False
return any(map(lambda data_value:match(data_value,template_value),pick(data,data_keys)))
return all(map(pick_and_match,template.items()))
elif callable(template):
return template(data)
else:
return data==template
def match_all(msg,templates):
return all(map(lambda t:match(msg,t),templates))
# Created by pyminifier (https://github.com/liftoff/pyminifier)
|
[
"cybersol@naver.com"
] |
cybersol@naver.com
|
1624a5f97468afb319da8fcebad9fbc440a0f1cb
|
60a831fb3c92a9d2a2b52ff7f5a0f665d4692a24
|
/IronPythonStubs/release/stubs.min/Autodesk/Revit/UI/Mechanical.py
|
7918f6d2c82c384d607a5c89c4a612322313f8a0
|
[
"MIT"
] |
permissive
|
shnlmn/Rhino-Grasshopper-Scripts
|
a9411098c5d1bbc55feb782def565d535b27b709
|
0e43c3c1d09fb12cdbd86a3c4e2ba49982e0f823
|
refs/heads/master
| 2020-04-10T18:59:43.518140
| 2020-04-08T02:49:07
| 2020-04-08T02:49:07
| 161,219,695
| 11
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,755
|
py
|
# encoding: utf-8
# module Autodesk.Revit.UI.Mechanical calls itself Mechanical
# from RevitAPIUI,Version=17.0.0.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class DuctFittingAndAccessoryPressureDropUIData(object,IDisposable):
""" The input and output data used by external UI servers for storing UI settings. """
def Dispose(self):
""" Dispose(self: DuctFittingAndAccessoryPressureDropUIData) """
pass
def GetUIDataItems(self):
"""
GetUIDataItems(self: DuctFittingAndAccessoryPressureDropUIData) -> IList[DuctFittingAndAccessoryPressureDropUIDataItem]
Gets all UI data items stored in the UI data.
Returns: An array of UI data items.
"""
pass
def GetUnits(self):
"""
GetUnits(self: DuctFittingAndAccessoryPressureDropUIData) -> Units
Gets units.
Returns: The Units object.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: DuctFittingAndAccessoryPressureDropUIData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: DuctFittingAndAccessoryPressureDropUIData) -> bool
"""
class DuctFittingAndAccessoryPressureDropUIDataItem(object,IDisposable):
""" Each duct fitting or duct accessory FamilyInstance has one DuctFittingAndAccessoryPressureDropUIDataItem. """
def Dispose(self):
""" Dispose(self: DuctFittingAndAccessoryPressureDropUIDataItem) """
pass
def GetDuctFittingAndAccessoryData(self):
"""
GetDuctFittingAndAccessoryData(self: DuctFittingAndAccessoryPressureDropUIDataItem) -> DuctFittingAndAccessoryData
Gets the fitting data stored in the UI data item.
Returns: The fitting data stored in the UI data item.
"""
pass
def GetEntity(self):
"""
GetEntity(self: DuctFittingAndAccessoryPressureDropUIDataItem) -> Entity
Returns the entity set by UI server.
or an invalid entity otherwise.
Returns: The returned Entity.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: DuctFittingAndAccessoryPressureDropUIDataItem,disposing: bool) """
pass
def SetEntity(self,entity):
"""
SetEntity(self: DuctFittingAndAccessoryPressureDropUIDataItem,entity: Entity)
Stores the entity in the UI data item.
entity: The Entity to be stored.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: DuctFittingAndAccessoryPressureDropUIDataItem) -> bool
"""
class IDuctFittingAndAccessoryPressureDropUIServer(IExternalServer):
""" Interface for external servers providing optional UI for duct fitting and duct accessory coefficient calculation. """
def GetDBServerId(self):
"""
GetDBServerId(self: IDuctFittingAndAccessoryPressureDropUIServer) -> Guid
Returns the Id of the corresponding DB server for which this server provides an
optional UI.
Returns: The Id of the DB server.
"""
pass
def ShowSettings(self,data):
"""
ShowSettings(self: IDuctFittingAndAccessoryPressureDropUIServer,data: DuctFittingAndAccessoryPressureDropUIData) -> bool
Shows the settings UI.
data: The input data of the calculation.
Returns: True if the user makes any changes in the UI,false otherwise.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
|
[
"magnetscoil@gmail.com"
] |
magnetscoil@gmail.com
|
a76fbfcbd6a9727bd7e8f2f90802a7d1f3d3dfec
|
bdb3716c644b8d031af9a5285626d7ccf0ecb903
|
/code/UI/OpenAPI/python-flask-server/KG2/openapi_server/test/test_entity_controller.py
|
e780d02dcb6fb787d438c3fd38e7c36eba951a02
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
RTXteam/RTX
|
97d2a8946d233d48cc1b165f5e575af21bda4b26
|
ed0693dd03149e56f7dfaf431fb8a82ace0c4ef3
|
refs/heads/master
| 2023-09-01T21:48:49.008407
| 2023-09-01T20:55:06
| 2023-09-01T20:55:06
| 111,240,202
| 43
| 31
|
MIT
| 2023-09-14T16:20:01
| 2017-11-18T21:19:13
|
Python
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
# coding: utf-8
from __future__ import absolute_import
import unittest
from flask import json
from six import BytesIO
from openapi_server.test import BaseTestCase
class TestEntityController(BaseTestCase):
"""EntityController integration test stubs"""
def test_get_entity(self):
"""Test case for get_entity
Obtain CURIE and synonym information about a search term
"""
query_string = [('q', ["MESH:D014867","NCIT:C34373"])]
headers = {
'Accept': 'application/json',
}
response = self.client.open(
'/api/rtxkg2/v1.0/entity',
method='GET',
headers=headers,
query_string=query_string)
self.assert200(response,
'Response body is : ' + response.data.decode('utf-8'))
if __name__ == '__main__':
unittest.main()
|
[
"edeutsch@systemsbiology.org"
] |
edeutsch@systemsbiology.org
|
273c9a78a2f06229134518a5ee0ee75c043c5e8f
|
cbd2eee46663fad5b5375b13c8c21b1b06eb4c6b
|
/ecloud/code/src/main/python/easted/network/__init__.py
|
dd0a418f0f5c9bd47a8e408f7d9ea0c723488ad7
|
[] |
no_license
|
1026237416/Python
|
ef474ee40d7efcd6dabb6fb0ecba81b4dcfc7e14
|
ffa8f9ffb8bfec114b0ca46295db05c4213c4c30
|
refs/heads/master
| 2021-07-05T00:57:00.456886
| 2019-04-26T10:13:46
| 2019-04-26T10:13:46
| 114,510,323
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 303
|
py
|
# -*- coding: utf-8 -*-
from exception import *
from network import *
from networkdao import *
from networkhost import *
from subnet import *
from subnetdao import *
from subnet import *
from tenant_subnet import *
from tenant_host import *
from common import request_create_ports, request_delete_ports
|
[
"1026237416@qq.com"
] |
1026237416@qq.com
|
fdb12c98cca4cd1b2931ec80ce4bc5bd1cfc0bc4
|
e902470b1e6dad9be93631d3663382082f2b3221
|
/supervised_learning/0x03-optimization/12-learning_rate_decay.py
|
5350524cf17e57663bd160289bdbecd21c9bff91
|
[] |
no_license
|
BrianFs04/holbertonschool-machine_learning
|
11bb645d86a0de74434d37bb36a239534e7d0787
|
d9b5fa4d60cd896c42242d9e72c348bd33046fba
|
refs/heads/master
| 2022-12-30T07:13:12.868358
| 2020-10-03T22:40:36
| 2020-10-03T22:40:36
| 279,386,948
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 348
|
py
|
#!/usr/bin/env python3
"""learning_rate_decay"""
import tensorflow as tf
def learning_rate_decay(alpha, decay_rate, global_step, decay_step):
"""Creates a learning rate decay operation"""
rate_op = tf.train.inverse_time_decay(alpha, global_step, decay_step,
decay_rate, True)
return(rate_op)
|
[
"brayanflorezsanabria@gmail.com"
] |
brayanflorezsanabria@gmail.com
|
7bbba9fa880f15ce4c10fba89b19daed63c4b17e
|
c9c1ac74238bd9ce8598af9ec4a52baae3cd4c26
|
/pkg/clm/DEBIAN/prerm
|
faadaf2324fcfda30cabf6dac71288502af74c20
|
[
"Apache-2.0"
] |
permissive
|
cloudcache/cc1
|
4dbd9b5931483eed2d62459546c502c2d0ceb513
|
360392995a8aea37573dd772d809a006b78f575b
|
refs/heads/master
| 2021-01-24T04:13:54.670317
| 2014-06-06T08:17:46
| 2014-06-06T08:17:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 940
|
#!/usr/bin/python
import sys
import subprocess
import netifaces
if __name__ == '__main__':
if 'remove' in sys.argv:
log = open('/var/log/cc1/cm_install.log', 'a')
r = subprocess.call('cc1_clm_setup_config remove', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_db_psql remove', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_apache disable', shell=True, stdout=log)
log.close()
sys.exit(0)
elif 'purge' in sys.argv:
log = open('/var/log/cc1/cm_install.log', 'a')
r = subprocess.call('cc1_clm_setup_config purge', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_db_psql purge', shell=True, stdout=log)
r = subprocess.call('cc1_clm_setup_apache purge', shell=True, stdout=log)
log.close()
sys.exit(0)
else:
print "Use cc1 tools (cc1_...) to reconfigure services!"
sys.exit(0)
|
[
"cc1@cloud.ifj.edu.pl"
] |
cc1@cloud.ifj.edu.pl
|
|
dd02bba44f176099278785f7272ae549e975f3f3
|
fa66cc0f9fba22da4b2a3291e804d358e88e0a47
|
/class6/exercises/ex2a_yaml_inventory.py
|
91ed3c734ef90b7de41327c17867727c82f012dd
|
[
"Apache-2.0"
] |
permissive
|
bminus87/pyplus_course
|
d6aa2246b9882454bca029a0c5d807b461b89561
|
46fcf460df0e06c0df563822ba604c78b75043ae
|
refs/heads/master
| 2020-05-30T00:53:19.291358
| 2019-05-30T19:12:28
| 2019-05-30T19:12:28
| 189,467,288
| 0
| 0
|
Apache-2.0
| 2019-05-30T19:00:36
| 2019-05-30T19:00:36
| null |
UTF-8
|
Python
| false
| false
| 928
|
py
|
import pyeapi
import yaml
from getpass import getpass
def yaml_load_devices(filename="arista_devices.yml"):
with open(filename, "r") as f:
return yaml.safe_load(f)
raise ValueError("Reading YAML file failed")
def main():
devices = yaml_load_devices()
password = getpass()
for name, device_dict in devices.items():
device_dict["password"] = password
connection = pyeapi.client.connect(**device_dict)
device = pyeapi.client.Node(connection)
output = device.enable("show ip arp")
print()
print("-" * 40)
arp_list = output[0]["result"]["ipV4Neighbors"]
for arp_entry in arp_list:
mac_address = arp_entry["hwAddress"]
ip_address = arp_entry["address"]
print("{:^15}{:^5}{:^15}".format(ip_address, "-->", mac_address))
print("-" * 40)
print()
if __name__ == "__main__":
main()
|
[
"ktbyers@twb-tech.com"
] |
ktbyers@twb-tech.com
|
5487a5680a39929ea9cadae8ca2975340c24f0cc
|
83e42b592923e56b99ff16d3762e89ffb29a75dc
|
/collective/abovecontentportlets/abovecontentportlets.py
|
b69467fe1ef6b3010757bc81b282e20085aa35e7
|
[] |
no_license
|
intk/collective.abovecontentportlets
|
996a1b2bac8c7a471b0e26287f08d87298b4a277
|
460c68e7e66622c355356e6f84e733e8d979ec11
|
refs/heads/master
| 2021-01-20T18:39:44.176794
| 2016-06-28T13:53:27
| 2016-06-28T13:53:27
| 62,136,662
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,490
|
py
|
from five import grok
from z3c.form import group, field
from zope import schema
from zope.interface import invariant, Invalid
from zope.schema.interfaces import IContextSourceBinder
from zope.schema.vocabulary import SimpleVocabulary, SimpleTerm
from plone.dexterity.content import Container
from plone.directives import dexterity, form
from plone.app.textfield import RichText
from plone.namedfile.field import NamedImage, NamedFile
from plone.namedfile.field import NamedBlobImage, NamedBlobFile
from plone.namedfile.interfaces import IImageScaleTraversable
from plone.dexterity.browser.view import DefaultView
from zope.interface import implementer
from collective.abovecontentportlets import MessageFactory as _
from datetime import date
from zope.component import queryMultiAdapter
from plone.app.layout.viewlets.common import ViewletBase
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from zope.component import getMultiAdapter
class AboveContentPortletsViewlet(ViewletBase):
index = ViewPageTemplateFile('abovecontentportlets_templates/portlet.pt')
def update(self):
super(AboveContentPortletsViewlet, self).update()
self.year = date.today().year
def render_abovecontent_portlets(self):
portlet_manager = getMultiAdapter(
(self.context, self.request, self.__parent__),
name='collective.abovecontentportlets'
)
portlet_manager.update()
return portlet_manager.render()
|
[
"andreslb1@gmail.com"
] |
andreslb1@gmail.com
|
7753fba106e04f5a55c9d9234dfd02dacd0afb9b
|
6e9d54971c55336fe93551d38e3fc7929b6ac548
|
/1008.ConstructBinarySearchTreefromPreorderTraversal.py
|
5cef22d795f2cc7e88c313bb9bfd1a9a5673cc3d
|
[] |
no_license
|
aucan/LeetCode-problems
|
61549b69c33a9ac94b600791c4055d4fbfb5a0c3
|
57b84c684a9171100166133ee04a69665334ca84
|
refs/heads/master
| 2023-04-12T00:46:26.521951
| 2021-05-04T13:44:28
| 2021-05-04T13:44:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,535
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 20 11:04:36 2020
@author: nenad
"""
"""
Problem URL: https://leetcode.com/problems/construct-binary-search-tree-from-preorder-traversal/
Problem description:
Return the root node of a binary search tree that matches the given preorder traversal.
(Recall that a binary search tree is a binary tree where for every node, any descendant of node.left has a value < node.val, and any descendant of node.right has a value > node.val. Also recall that a preorder traversal displays the value of the node first, then traverses node.left, then traverses node.right.)
Example 1:
Input: [8,5,1,7,10,12]
Output: [8,5,10,1,7,null,12]
Note:
1 <= preorder.length <= 100
The values
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# Time: O(n), space: O(n)
class Solution:
index = 0
def bstFromPreorder(self, preorder) -> TreeNode:
n = len(preorder)
def reconstructBST(minimum, maximum):
# all elements from preorder are used
if Solution.index >= n:
return
root = None
# take next element from preorder array
nodeValue = preorder[Solution.index]
# node belongs to the current subtree
if minimum < nodeValue < maximum:
# create new node
root = TreeNode(nodeValue)
# go to next index
Solution.index += 1
if Solution.index < n:
# reconstruct left and right subtree
# maximum value in the left subtree will be value of current node (all values in left subtree are smaller)
# minimum value in the right subtree will be value of current node (all values in right subtree are greater)
root.left = reconstructBST(minimum, nodeValue)
root.right = reconstructBST(nodeValue, maximum)
return root
# initial bounds are - -oo and +oo
root = reconstructBST(float("-inf"), float("inf"))
# reset index - for test cases sake
Solution.index = 0
return root
def preorder(root):
if root is None:
return
print(root.val, end=" ")
preorder(root.left)
preorder(root.right)
sol = Solution()
# Test 1
root = sol.bstFromPreorder([8,5,1,7,10,12])
preorder(root)
|
[
"nenadpantelickg@gmail.com"
] |
nenadpantelickg@gmail.com
|
a20a45a37faa268905d98259d366dd9792eac8a8
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-eg/huaweicloudsdkeg/v1/model/update_channel_request.py
|
16e1db4ed57fc9808a3041c879fafe6b983edd17
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 3,925
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateChannelRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'channel_id': 'str',
'body': 'ChannelUpdateReq'
}
attribute_map = {
'channel_id': 'channel_id',
'body': 'body'
}
def __init__(self, channel_id=None, body=None):
"""UpdateChannelRequest
The model defined in huaweicloud sdk
:param channel_id: 指定查询的事件通道ID
:type channel_id: str
:param body: Body of the UpdateChannelRequest
:type body: :class:`huaweicloudsdkeg.v1.ChannelUpdateReq`
"""
self._channel_id = None
self._body = None
self.discriminator = None
self.channel_id = channel_id
if body is not None:
self.body = body
@property
def channel_id(self):
"""Gets the channel_id of this UpdateChannelRequest.
指定查询的事件通道ID
:return: The channel_id of this UpdateChannelRequest.
:rtype: str
"""
return self._channel_id
@channel_id.setter
def channel_id(self, channel_id):
"""Sets the channel_id of this UpdateChannelRequest.
指定查询的事件通道ID
:param channel_id: The channel_id of this UpdateChannelRequest.
:type channel_id: str
"""
self._channel_id = channel_id
@property
def body(self):
"""Gets the body of this UpdateChannelRequest.
:return: The body of this UpdateChannelRequest.
:rtype: :class:`huaweicloudsdkeg.v1.ChannelUpdateReq`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this UpdateChannelRequest.
:param body: The body of this UpdateChannelRequest.
:type body: :class:`huaweicloudsdkeg.v1.ChannelUpdateReq`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateChannelRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
eb146857efd243b4b309e66308a85ae8576f3360
|
71324aca11e16d6da17b0440e72d0107f5af6e04
|
/ptt_blog/blog/models.py
|
aa4bd122f7bcb84f921c94e8680a92d716cb437f
|
[
"MIT"
] |
permissive
|
n3k0fi5t/Django_Tutorial
|
6bad82a919d1de0162b34f4c7f753cd126b05cc3
|
e3953335ca88fe22c68268fd76afb7c4f9bbb55f
|
refs/heads/master
| 2023-02-16T07:56:56.416031
| 2021-01-11T23:17:33
| 2021-01-11T23:17:33
| 291,436,266
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 537
|
py
|
from django.db import models
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=50)
content = models.TextField()
date = models.DateTimeField(auto_now_add=True)
class Meta:
ordered = ['-date', ]
class Push(models.Model):
pusher = models.CharField(max_length=50)
content = models.TextField()
post = models.ForeignKey(Post, on_delete=models.CASCADE)
class PostImage(models.Model):
url = models.TextField(null=False)
post = models.ManyToManyField(Post)
|
[
"r05922078@ntu.edu.tw"
] |
r05922078@ntu.edu.tw
|
55f4af44562a8fc8d1b2bddb29a09dc31b69a781
|
4913fb7fd32c3dd0da53af7a012569ec2254b35a
|
/86.继承.py
|
2b60c2224b77d2ecd852301be1d17e7343045f9e
|
[] |
no_license
|
puhaoran12/python_note
|
8a21954050ba3126f2ef6d5d1e4a2904df954b9b
|
b807e7b7dd90c87cee606f50421400c8f3d0ba03
|
refs/heads/master
| 2023-07-07T20:20:04.546541
| 2021-08-21T02:17:12
| 2021-08-21T02:17:12
| 398,439,725
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 793
|
py
|
# 继承
# 语法格式:
# class 子类类名(父类1,父类2...):
# pass
# 如果一个类没有继承任何类,则默认继承object
# 定义子类时,必须在其构造函数中调用父类的构造函数
class Person(object):
def __init__(self,name,age):
self.name=name
self.age=age
def info(self):
print('姓名:{0},年龄:{1}'.format(self.name,self.age))
class Student(Person):
def __init__(self,name,age,score):
super().__init__(name,age)
self.score=score
class Teacher(Person):
def __init__(self,name,age,teacheryear):
super().__init__(name,age)
self.teacheryear=teacheryear
stu=Student('张三',20,100)
stu.info()
print(stu.score)
tea=Teacher('李四',50,28)
tea.info()
print(tea.teacheryear)
|
[
"276191374@qq.com"
] |
276191374@qq.com
|
5fd4bd2fcda784801ee0548c57a0339b09041fa4
|
76f1331d083d360fb3822312537e72d4ff9d50b5
|
/keywords_extraction/multilanguage/util/file_utils.py
|
654041f49873d8e758e9dc718089da91ad352125
|
[] |
no_license
|
ZouJoshua/ml_project
|
2fe0efee49aa1454b04cd83c61455232601720a6
|
b1d8eb050182cd782bc6f3bb3ac1429fe22ab7b7
|
refs/heads/master
| 2021-07-22T10:37:56.452484
| 2020-05-09T09:54:39
| 2020-05-09T09:54:39
| 158,562,016
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 696
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Author : Joshua
@Time : 12/5/19 1:47 PM
@File : file_utils.py
@Desc :
"""
from keywords_extraction.multilanguage.core.language import Language
def writeLines(filePath, lines):
writer = open(filePath, 'w', encoding='utf-8')
for line in lines:
writer.write(line + '\n')
writer.close()
def readLines(filePath):
reader = open(filePath, 'r', encoding='utf-8')
return [line.strip() for line in reader.readlines()]
def readLanguages(filePath):
return [Language(kv[0], kv[1]) for kv in [line.split('\t') for line in readLines(filePath)]]
def readStopwords(filePath):
return set(readLines(filePath))
|
[
"joshua_zou@163.com"
] |
joshua_zou@163.com
|
437324b82fcec902626d1fcec0186f7e95d6bed7
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02270/s773469916.py
|
da9d345dfc9c9101bb74b1f5e3021c1dd24ec32e
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 521
|
py
|
import math
n, k = map(int, input().split())
num = [int(input()) for i in range(n)]
#num.sort()
left = max(max(num), math.ceil(sum(num)/k))
right = sum(num)
while left < right:
mid = (left + right) // 2
track = 1
cnt = 0
flag = 0
for i in num:
cnt += i
if cnt > mid:
track+=1
cnt = i
if track > k:
flag = 1
break
if flag:
break
if flag == 0:
right = mid
else:
left = mid+1
print(left)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
b6c3622f32e6ca37c49c9b38c97cdb597eb83ca8
|
d9c0a55dfc3a87e4166f3ee73d60d302730278d1
|
/board/urls.py
|
2215b2f3120742acd4a673f42981a768205a51a5
|
[] |
no_license
|
mzazakeith/codeboard
|
358872794b1b2c35886bde8c26631e7d4877883c
|
f73b9a303d6ab5984c4ce73684e9ed34ceff4fe4
|
refs/heads/development
| 2020-03-25T15:59:23.795795
| 2018-08-10T07:20:36
| 2018-08-10T07:20:36
| 143,910,011
| 0
| 0
| null | 2018-08-10T07:20:37
| 2018-08-07T18:07:48
|
Python
|
UTF-8
|
Python
| false
| false
| 625
|
py
|
from django.conf.urls import url, include
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^home$', views.home, name='home'),
url(r'^new-service$', views.new_service, name='new-service'),
url(r'^userprofile/(?P<user_id>\d+)', views.userprofile, name='profile'),
url(r'^forum$', views.forum, name='forum'),
url(r'^comment/(?P<topic_id>\d+)', views.comment, name='comment'),
url(r'^read/(?P<msg_id>\d+)', views.read, name='read'),
url(r'^rate/(?P<user_id>\d+)', views.rate, name='rate'),
url(r'^all-services/', views.get_services, name='all-services')
]
|
[
"mzazakeith@gmail.com"
] |
mzazakeith@gmail.com
|
5d43aaf753ce4267514d56d9a4f3b9e3b905f9c2
|
3d989666e6ceb2abc9175dcf7b1d0c1f8c76d205
|
/py_solution/p119_yanghui_ii.py
|
610dda33ec40d7516cd4cee381295e6206749b63
|
[] |
no_license
|
dengshilong/leetcode
|
00ae0898b4645efd1de69a13f2fa92606e899297
|
5ab258f04771db37a3beb3cb0c490a06183f7b51
|
refs/heads/master
| 2021-01-10T11:58:10.396399
| 2020-04-10T12:10:54
| 2020-04-10T12:10:54
| 47,912,974
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 576
|
py
|
class Solution(object):
def getRow(self, rowIndex):
"""
:type rowIndex: int
:rtype: List[int]
"""
res = [0] * (rowIndex + 1)
res[0] = 1
for i in range(1, rowIndex + 1):
for j in range(i, 0, -1):
if j == i:
res[j] = 1
elif j == 0:
res[j] = 1
else:
res[j] = res[j] + res[j - 1]
return res
if __name__ == "__main__":
solution = Solution()
assert solution.getRow(3) == [1, 3, 3, 1]
|
[
"dengshilong1988@gmail.com"
] |
dengshilong1988@gmail.com
|
0e26994af468ecff3062e2111115bd78d92a4d34
|
2869eb01810389ab7b64355ec189800e7b1d49b9
|
/picoCTF 2021/Cryptography/ddes/ddes.py
|
d417d6774654db27ccb697bcb42c0ffd37d83515
|
[] |
no_license
|
Giantpizzahead/ctf-archive
|
096b1673296510bddef9f284700ebdb0e76d71a7
|
5063cd2889cd300aade440429faf9c4ca68511ef
|
refs/heads/master
| 2021-12-28T14:23:33.598960
| 2021-12-20T17:23:55
| 2021-12-20T17:23:55
| 252,905,854
| 1
| 0
| null | 2021-12-20T17:14:05
| 2020-04-04T04:11:24
|
C
|
UTF-8
|
Python
| false
| false
| 1,023
|
py
|
#!/usr/bin/python3 -u
from Crypto.Cipher import DES
import binascii
import itertools
import random
import string
def pad(msg):
block_len = 8
over = len(msg) % block_len
pad = block_len - over
return (msg + " " * pad).encode()
def generate_key():
return pad("".join(random.choice(string.digits) for _ in range(6)))
FLAG = open("flag").read().rstrip()
KEY1 = generate_key()
KEY2 = generate_key()
def get_input():
try:
res = binascii.unhexlify(input("What data would you like to encrypt? ").rstrip()).decode()
except:
res = None
exit(0)
return res
def double_encrypt(m):
msg = pad(m)
cipher1 = DES.new(KEY1, DES.MODE_ECB)
enc_msg = cipher1.encrypt(msg)
cipher2 = DES.new(KEY2, DES.MODE_ECB)
return binascii.hexlify(cipher2.encrypt(enc_msg)).decode()
print("Here is the flag:")
print(double_encrypt(FLAG))
while True:
inputs = get_input()
if inputs:
print(double_encrypt(inputs))
else:
print("Invalid input.")
|
[
"43867185+Giantpizzahead@users.noreply.github.com"
] |
43867185+Giantpizzahead@users.noreply.github.com
|
b1eb29ee864eccdeb3ab8cfa998a45eb742f03e2
|
44bbfe1c9a7f16e632cdd27c2de058033b33ea6d
|
/mayan/apps/document_indexing/migrations/0013_auto_20170714_2133.py
|
6ccaeb6689a30d2b114e6f833b74cc71243d8372
|
[
"Apache-2.0",
"ISC",
"MIT"
] |
permissive
|
lxny2004/open-paperless
|
34025c3e8ac7b4236b0d8fc5ca27fc11d50869bc
|
a8b45f8f0ee5d7a1b9afca5291c6bfaae3db8280
|
refs/heads/master
| 2020-04-27T04:46:25.992405
| 2019-03-06T03:30:15
| 2019-03-06T03:30:15
| 174,064,366
| 0
| 0
|
NOASSERTION
| 2019-03-06T03:29:20
| 2019-03-06T03:29:20
| null |
UTF-8
|
Python
| false
| false
| 480
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-07-14 21:33
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('document_indexing', '0012_auto_20170530_0728'),
]
operations = [
migrations.AlterModelOptions(
name='index',
options={'ordering': ('label',), 'verbose_name': 'Index', 'verbose_name_plural': 'Indexes'},
),
]
|
[
"littlezhoubear@gmail.com"
] |
littlezhoubear@gmail.com
|
4bec73da24b04e8ce1af04ca95cda3cfc8369b6e
|
9dab41a71bf19a9ad17ee3e9f77c0f58aebd1d6d
|
/python/uline/uline/uline/handlers/api/risk/urls.py
|
1cf460fe48872c54f9c6f0ee70bdc2f31883a7c4
|
[] |
no_license
|
apollowesley/Demo
|
f0ef8ec6c4ceb0aec76771da8dd9a62fb579eac8
|
471c4af95d3a7222d6933afc571a8e52e8fe4aee
|
refs/heads/master
| 2021-02-15T04:01:51.590697
| 2018-01-29T01:44:29
| 2018-01-29T01:44:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 241
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from tornado.web import URLSpec as url
from uline.handlers.api.risk.alipay_risk import AlipayTransactionRiskNotify
# 前缀/api/fee/
urls = [
url(r'/alipay', AlipayTransactionRiskNotify)
]
|
[
"36821277@qq.com"
] |
36821277@qq.com
|
50b551d144ecadac213a7c075f1701bc50f5ea45
|
e64b6966665a0964e382953a96df1ebe1a41cf10
|
/0001-0100/0096-Unique Binary Search Trees/0096-Unique Binary Search Trees.py
|
277969006a390820311ba754da3b175119ed218e
|
[
"MIT"
] |
permissive
|
deepbas/LeetCode
|
8dfbb2b0b88b32c01033e6eabd8a3641c9a57083
|
a93f907f03cb3861e6858370f57129e01563fe5a
|
refs/heads/master
| 2020-08-01T14:14:38.649057
| 2019-09-09T01:51:42
| 2019-09-09T01:51:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
py
|
class Solution:
def numTrees(self, n: int) -> int:
dp = [0] * (n + 1)
dp[0] = 1
for j in range(1, n + 1):
for i in range(j):
dp[j] += dp[i] * dp[j - i - 1]
return dp[n]
|
[
"jiadaizhao@gmail.com"
] |
jiadaizhao@gmail.com
|
412857054cb500dfa2b27027186cf74f4f227492
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20180525/example_structlog/fixedlog/__init__.py
|
f62cd854d6508bd55d2fb0d39b93de3b120d5ff5
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 340
|
py
|
import structlog
def get_logger(name, *args, **kwargs):
logger = structlog.get_logger(name, source=name)
return logger
DEFAULT_PROCESSORS = [
structlog.processors.JSONRenderer(),
]
def setup(*args, **kwargs):
kwargs["processors"] = kwargs.pop("processors", DEFAULT_PROCESSORS)
structlog.configure(*args, **kwargs)
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
bb02b542b17a538bf4ce6df589bf32f9432df22d
|
010c5fbc97731286be00028ff33fc981d943bca3
|
/primal/src/code/impute/impute/dev/plot_segments.py
|
230cd92d5c372b17af8ef68bb5ce21fcfd9e04d8
|
[] |
no_license
|
orenlivne/ober
|
6ce41e0f75d3a8baebc53e28d7f6ae4aeb645f30
|
810b16b2611f32c191182042240851152784edea
|
refs/heads/master
| 2021-01-23T13:48:49.172653
| 2014-04-03T13:57:44
| 2014-04-03T13:57:44
| 6,902,212
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 347
|
py
|
'''
Created on Jan 31, 2013
@author: oren
'''
import sys, matplotlib.pylab as P, numpy as np
if __name__ == '__main__':
s = np.loadtxt(sys.argv[1])
P.figure(1)
P.clf()
P.hist((s[:, 3] - s[:, 2]) / 1e6, 50)
P.xlabel('Length [Mbp]')
P.ylabel('Frequency')
P.title('IBD Segment Length Distribution in the Hutterites')
|
[
"oren.livne@gmail.com"
] |
oren.livne@gmail.com
|
2c797dafb38c53aaf146fd45cba67212fe584319
|
dacb2bba2c91877c5157ccb8ab34e112abfea0ee
|
/projects/project_12/src/navigation/scripts/waypoint_logger.py
|
15828bacbbada3e4a83c3efa3875b7e5f4697bdd
|
[] |
no_license
|
amuamushu/projects-2020-2021
|
7fd4e29a8f51406ded59a97cd878a5752ffc700b
|
f1c385e46d2d5475b28dec91b57a933ac81c23c5
|
refs/heads/main
| 2023-04-01T11:47:55.935278
| 2021-03-28T00:37:29
| 2021-03-28T00:37:29
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,538
|
py
|
#!/usr/bin/env python
import pandas as pd
import rospy
from nav_msgs.msg import Odometry
class WaypointLogger:
def __init__(self):
self.output_name = rospy.get_param("~wp_log_output", "saved_waypoints.csv")
self.odom_topic = rospy.get_param("~odom_topic", "/vehicle/odom")
# save waypoints from odometry in csv
self.wps = []
self.current_count = 0
self.logging_time = 100
rospy.Subscriber(self.odom_topic, Odometry, self.odom_callback)
rospy.loginfo("Started Odometry")
def odom_callback(self, data):
waypoint = (data.pose.pose.position.x,
data.pose.pose.position.y,
data.pose.pose.position.z,
data.pose.pose.orientation.x,
data.pose.pose.orientation.y,
data.pose.pose.orientation.z,
data.pose.pose.orientation.w)
self.wps.append(waypoint)
# Save waypoint logs every self.logging_time iterations
if self.current_count % self.logging_time == 0:
rospy.logerr(f"saving odometry logs to {self.output_name}")
self.df_logs = pd.DataFrame(self.wps, columns=["x", "y", "z", "qx", "qy", "qz", "qw"])
self.df_logs.to_csv(self.output_name, index=False)
self.current_count += 1
if __name__ == '__main__':
try:
rospy.init_node('waypoint_logger', anonymous=True)
WaypointLogger()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
[
"aaron.fraenkel@gmail.com"
] |
aaron.fraenkel@gmail.com
|
2d38b298243ab9608a59b9b5c60b83576b6a368b
|
25ebc03b92df764ff0a6c70c14c2848a49fe1b0b
|
/daily/20200414/codes/output/code111.py
|
e947c90eb401915b8fe4acb6b8e2111b71fd0060
|
[] |
no_license
|
podhmo/individual-sandbox
|
18db414fafd061568d0d5e993b8f8069867dfcfb
|
cafee43b4cf51a321f4e2c3f9949ac53eece4b15
|
refs/heads/master
| 2023-07-23T07:06:57.944539
| 2023-07-09T11:45:53
| 2023-07-09T11:45:53
| 61,940,197
| 6
| 0
| null | 2022-10-19T05:01:17
| 2016-06-25T11:27:04
|
Python
|
UTF-8
|
Python
| false
| false
| 410
|
py
|
import pygal
box_plot = pygal.Box(box_mode="pstdev")
box_plot.title = 'V8 benchmark results'
box_plot.add('Chrome', [6395, 8212, 7520, 7218, 12464, 1660, 2123, 8607])
box_plot.add('Firefox', [7473, 8099, 11700, 2651, 6361, 1044, 3797, 9450])
box_plot.add('Opera', [3472, 2933, 4203, 5229, 5810, 1828, 9013, 4669])
box_plot.add('IE', [43, 41, 59, 79, 144, 136, 34, 102])
print(box_plot.render(is_unicode=True))
|
[
"ababjam61+github@gmail.com"
] |
ababjam61+github@gmail.com
|
afb9fc8593fbbc1f50b77535b5f02b2d020e9683
|
fce8a56e09739bad6b0953fc3d890292bb2b7f31
|
/RedditScraperService/FileWriting.py
|
30dcc3aaadff30e3bffd6fc5040aac23ee460864
|
[] |
no_license
|
whorst/WsbInvesting
|
d69f84af2e9bd81560ee8b88d7e882f022350aae
|
e48ea1a2f3434ab3084746ecce4012c656800280
|
refs/heads/master
| 2022-11-26T00:23:24.887989
| 2020-08-06T03:04:16
| 2020-08-06T03:04:16
| 284,287,474
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 623
|
py
|
def writeRidiculouslyHighOrLowToFile(comment):
outFile = open("resources/files/ridiculouslyHighOrLowReason", "a")
outFile.write(comment)
outFile.write("\n\n")
outFile.close()
def writeClosePositionFailureToFile(msg):
outFile = open("resources/files/closePositionFailure", "a")
outFile.write(msg+"\n\n")
outFile.close()
def writeValidPositionsToFile(comment, newPosition):
outFile = open("resources/files/commentFileOut", "a")
outFile.write("\n\n")
outFile.write(comment)
outFile.write("\n")
outFile.write(newPosition.__str__())
outFile.write("\n\n")
outFile.close()
|
[
"you@example.com"
] |
you@example.com
|
8b9a6b1fa210fa7d31ae50de5904a35240c8326d
|
7c8da5b1fa05c6d6799aa3d0aef0fadb215f04db
|
/example/vis.py
|
97ff149c6340e0ad47c5b8a30f5d81210c343df5
|
[] |
no_license
|
jackd/tiny_imagenet
|
a71a7b2a3b328dd6e2a06ec37cf4da365b837d32
|
1669940c8ec6ecc6d38a7094b4a00a7182020603
|
refs/heads/master
| 2020-04-12T08:29:34.255095
| 2018-12-19T05:12:05
| 2018-12-19T05:12:05
| 162,386,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 821
|
py
|
#!/usr/bin/python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from tiny_imagenet.manager import TinyImagenetManager, N_VAL_EXAMPLES
man = TinyImagenetManager()
print(man.extract_dir)
ids = man.load_wordnet_ids()
indices = {wnid: i for i, wnid in enumerate(ids)}
val_ids, bboxes = man.load_val_annotations()
wnids = man.load_wordnet_ids()
words = man.load_words()
class_words = [words[wnid] for wnid in wnids]
val_indices = [indices[i] for i in val_ids]
for i in range(N_VAL_EXAMPLES):
image = Image.open(man.val_image_path(i))
class_index = val_indices[i]
plt.imshow(np.array(image))
plt.title('%d: %s' % (class_index, class_words[class_index]))
plt.show()
|
[
"thedomjack@gmail.com"
] |
thedomjack@gmail.com
|
ad13fe4d4034323194ccb0aa7d343a294e231820
|
7bb9bd2bdadef1590b2ef7ff309e08abf454e49d
|
/Tests/pydeijao.py
|
1c0590f6fee34dceac4db08a964cc1f5fc772b52
|
[] |
no_license
|
ALREstevam/Curso-de-Python-e-Programacao-com-Python
|
afdf12717a710f20d4513d5df375ba63ba1e1c19
|
af6227376736e63810e5979be54eb1c433d669ac
|
refs/heads/master
| 2021-09-07T12:11:17.158298
| 2018-02-22T17:47:19
| 2018-02-22T17:47:19
| 87,453,286
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 999
|
py
|
import requests as Rq
from bs4 import BeautifulSoup as Bs
link = 'http://www.pfl.unicamp.br/Rest/view/site/cardapio.php'
page = Rq.get(link)
soup = Bs(page.content, 'html.parser', from_encoding='iso-8859-1')
soup = Bs(page.content, 'html.parser')
def formatInput(text):
text = text.replace('\n', '')
text = text.replace('\r', '')
text = text.replace(': ', ':')
text = text.replace(' - ', '-')
text = text.strip()
return text
html = []
html.append(formatInput(soup.find_all('tr')[0].getText()))
for i in range(4, 11):
html.append(formatInput(soup.find_all('tr')[i].getText()))
print('\n\n')
print('{:^50s}'.format(html[0].split('-')[0].upper()))
print()
print('{:^50s}'.format(html[0].split('-')[1]))
print('-'*57)
broke = []
for elem in html[2:11]:
broke.append(elem.split(':'))
for tup in broke:
for elem in tup:
elem = formatInput(elem)
for tup in broke:
print('| {:20s} | {:<30s} |'.format(tup[0], tup[1].lower()))
print('-'*57)
input()
|
[
"a166348@g.unicamp.com"
] |
a166348@g.unicamp.com
|
ad841ea0fa5e33e9fa357d47590200fdfde6347c
|
dadd814aceb7ad6698107dea474f92855f79ba51
|
/ReplicatedStochasticGradientDescent/rsgd/ReplicatedStochasticGradientDescent.py
|
978541c75ccda28c54978f4ee9d18d6fed0ffb8a
|
[
"MIT"
] |
permissive
|
Nico-Curti/rSGD
|
16e41524be2dd8d4988a5ecd368d3ac72d072ffe
|
b1f72c06a7f68c04fc97aaeae45d75852b541d42
|
refs/heads/master
| 2020-05-20T15:18:33.520000
| 2019-10-01T14:00:40
| 2019-10-01T14:00:40
| 185,641,977
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,828
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import division
import pickle
import pandas as pd
import numpy as np
import warnings
from scipy.special import erf
from .Patterns import Pattern
from .misc import _check_string
from lib.ReplicatedStochasticGradientDescent.rSGD import _rsgd
from lib.ReplicatedStochasticGradientDescent.rSGD import _predict
import multiprocessing
__package__ = "ReplicatedStochasticGradientDescent"
__author__ = ["Nico Curti (nico.curit2@unibo.it)", "Daniele Dall'Olio (daniele.dallolio@studio.unibo.it)"]
NTH = multiprocessing.cpu_count()
class ReplicatedStochasticGradientDescent():
def __init__(self, K=1, formula='simple', max_iter=1000, seed=135, init_equal=True, waitcenter=False, center=False):
'''
'''
if formula not in ['simple', 'hard', 'continuous', 'corrected']:
raise TypeError('Invalid iteration scheme. Allowed values are ["simple", "hard", "continuous", "corrected"]')
self._K = K
self._formula = formula
self._max_iter = max_iter
self._seed = seed
self._init_equal = init_equal
self._waitcenter = waitcenter
self._center = center
self._weights = None
self._fit = False
def predict(self, X):
'''
Predict the new labels computed by ReplicatedStochasticGradientDescent model
Parameters
----------
X : array of shape [n_samples, n_features]
The input samples.
Returns
-------
y : array of shape [n_samples]
The predicted target values.
'''
if not self._fit:
raise ValueError('ReplicatedStochasticGradientDescent classifier is not fitted yet. Please use the fit method before predict')
if not self._weights:
raise ValueError("Weights must be computed before predict function. Use 'fit' function or 'load_weights' to read them from file")
if isinstance(X, Pattern):
testset = X # use this with c++ version
else:
testset = Pattern(X, []) # use this with c++ version
N, K = np.shape(X)
# miss check dimensions
predicted_labels = _predict(testset, self._weights.ravel().astype('int64'), N)
return predicted_labels
def fit(self, X, y=None, parameters={'y' : 1, 'eta': (2., 1.), 'lambda' : (.1, 1.), 'gamma' : (float('Inf'), .01) }, nth=NTH):
'''
Fit the ReplicatedStochasticGradientDescent model meta-transformer
Parameters
----------
X : array-like of shape (n_samples, n_features)
The training input samples.
y : array-like, shape (n_samples,)
The target values (integers that correspond to classes in
classification, real numbers in regression).
Returns
-------
self : object
Returns self.
'''
self._fit = False
if isinstance(X, Pattern):
pattern = X
else:
pattern = Pattern(X, y)
self._weights = _rsgd(pattern=pattern.pattern,
K=self._K,
y=parameters['y'],
eta=parameters['eta'],
lamda=parameters['lambda'],
gamma=parameters['gamma'],
formula=self._formula,
seed=self._seed,
max_iter=self._max_iter,
init_equal=self._init_equal,
waitcenter=self._waitcenter,
center=self._center,
nth=nth
)
self._fit = True
return self
def load_weights(self, weightfile, delimiter='\t', binary=False):
'''
Load weights from file
Parameters
----------
weightfile : string
Filename of weights
delimiter : char
Separator for ascii loading
binary : bool
Switch between binary and ascii loading style
Returns
-------
self
'''
if binary:
with open(weightfile, 'rb') as fp:
self._weights = pickle.load(fp)
else:
self._weights = pd.read_csv(weightfile, sep=delimiter, header=None).values.tolist()
self._fit = True
return self
def save_weights(self, weightfile, delimiter='\t', binary=False):
'''
Load weights from file
Parameters
----------
weightfile : string
Filename to dump the weights
delimiter : char
Separator for ascii dump
binary : bool
Switch between binary and ascii dumping style
'''
if binary:
with open(weightfile, 'wb') as fp:
pickle.dump(self._weights, fp)
else:
pd.DataFrame(self._weights).to_csv(weightfile, sep=delimiter, header=False, index=False)
def __repr__(self):
class_name = self.__class__.__name__
return '<{} Class>'.format(class_name)
|
[
"nico.curti2@unibo.it"
] |
nico.curti2@unibo.it
|
5e9517f7cd2ef1665d41f8b905ff72df96c9955c
|
6e5ab77fee1fb4a0310213dd8c6dd8601828b1b9
|
/Algorithm/Swea/D3_10761.py
|
32b83f64fe69ec3ebfcdef0f23b68b5069769425
|
[] |
no_license
|
hongyong3/TIL
|
36d031c0da9e3e6db3eebb977bd3e12df00a849f
|
7f1492128e957a78fc95b255f4f7f2978161e471
|
refs/heads/master
| 2023-08-19T09:16:03.231757
| 2023-08-18T09:38:47
| 2023-08-18T09:38:47
| 162,100,258
| 1
| 0
| null | 2023-02-11T00:52:32
| 2018-12-17T08:42:42
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,755
|
py
|
import sys
sys.stdin = open("D3_10761_input.txt", "r")
def solve1(a, b, ad, bd):
if a[0][2] > ad:
ad += 1
T = int(input())
for test_case in range(T):
data = input().split()[1:]
O, B, k = [], [], 1 # [k, name, distance]; 순서, name, 거리
ODist, BDist = 1, 1
ans = 0
while data:
if data[0] == 'B':
B.append([k, data.pop(0), int(data.pop(0))])
else:
O.append([k, data.pop(0), int(data.pop(0))])
k += 1
while O or B:
if O and B:
if O[0][0] > B[0][0]:
if B[0][2] > BDist:
BDist += 1
elif B[0][2] == BDist:
B.pop(0)
else:
BDist -= 1
if O[0][2] > ODist:
ODist += 1
elif O[0][2] == ODist:
pass
else:
ODist -= 1
else:
if O[0][2] > ODist:
ODist += 1
elif O[0][2] == ODist:
O.pop(0)
else:
ODist -= 1
if B[0][2] > BDist:
BDist += 1
elif B[0][2] == BDist:
pass
else:
BDist -= 1
elif O and not B:
if O[0][2] > ODist:
ODist += 1
elif O[0][2] == ODist:
O.pop(0)
else:
ODist -= 1
else:
if B[0][2] > BDist:
BDist += 1
elif B[0][2] == BDist:
B.pop(0)
else:
BDist -= 1
ans += 1
print("#{} {}".format(test_case + 1, ans))
|
[
"chy66822495@gmail.com"
] |
chy66822495@gmail.com
|
027615d70d9fdcdf0d7bf877bb460b3469d5d748
|
046207f434966462fff55f634ba5a450d2208534
|
/APSS/hanoi.py
|
21b4fc20625f00db38af0fb461694eb31d1690ae
|
[] |
no_license
|
sungsikyang92/pythonStudy
|
e293e1ac8af443809f840ccee7052a8f57480b70
|
26522b5e232ccd9ab25c52122d254aa7249a8fdf
|
refs/heads/master
| 2023-07-04T16:58:40.318976
| 2021-08-04T02:00:27
| 2021-08-04T02:00:27
| 365,398,522
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,495
|
py
|
def hanoi_tower(n, start, end):
if n == 1:
print(start, end)
return
hanoi_tower(n - 1, start, 6 - start - end) # 1단계
print(start, end) # 2단계
hanoi_tower(n - 1, 6 - start - end, end) # 3단계
n = int(input())
print(2 ** n - 1)
hanoi_tower(n, 1, 3)
# 하노이의 탑 # 입력: 옮기려는 원반의 갯수 n
# 옮길 원반이 현재 있는 출발점 기둥 from_pos
# 원반을 옮길 도착점 기둥 to_pos
# 옮기는 과정에서 사용할 보조 기둥 aux_pos
# 출력: 원반을 옮기는 순서
# def hanoi(n, from_pos, to_pos, aux_pos):
# if n == 1: # 원반 한 개를 옮기는 문제면 그냥 옮기면 됨
# print(from_pos, "->", to_pos)
# return
# # 원반 n - 1개를 aux_pos로 이동(to_pos를 보조 기둥으로)
# hanoi(n - 1, from_pos, aux_pos, to_pos)
# # 가장 큰 원반을 목적지로 이동
# print(from_pos, "->", to_pos)
# # aux_pos에 있는 원반 n-1개를 목적지로 이동(from_pos를 보조 기둥으로)
# hanoi(n - 1, aux_pos, to_pos, from_pos)
#
# print("n = 1")
# hanoi(1, 1, 3, 2) # 원반 한 개를 1번 기둥에서 3번 기둥으로 이동(2번을 보조 기둥으로)
# print("n = 2")
# hanoi(2, 1, 3, 2) # 원반 두 개를 1번 기둥에서 3번 기둥으로 이동(2번을 보조 기둥으로)
# print("n = 3")
# hanoi(3, 1, 3, 2) # 원반 세 개를 1번 기둥에서 3번 기둥으로 이동(2번을 보조 기둥으로)
|
[
"sungsik.yang92@gmail.com"
] |
sungsik.yang92@gmail.com
|
a0fc06bf78776729139fe2a1d000a1a3dc8067cd
|
6c49c40d35d485c6fa7f9b92358b0888751b1dbe
|
/data/QSO_CIV_catalogs/matchingHam17toKoz17.py
|
d1e772e4082e9901d45e7d1b9c9c7bb813dfb224
|
[] |
no_license
|
d80b2t/CIV_CLQs
|
7f8bfa954f29e9516ddb9ce80c59fffe9ce8235b
|
95ea176c8f5ab6ee0c19d42aab964ebfd8bd0ce8
|
refs/heads/master
| 2020-08-31T20:56:17.168339
| 2020-07-30T13:35:19
| 2020-07-30T13:35:19
| 218,782,627
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,064
|
py
|
import numpy as np
import pandas as pd
from pandas import DataFrame
from astropy.io import fits
from astropy.io import ascii
from astropy.table import Table
## Hamann et al. (2017) ERQ BOSS DR12 catalog
path = '/cos_pc19a_npr/data/ERQs/Hamann2017_CIVcatalog/'
filename = 'C4N5REWs_DR12v11_MNRAS.fits'
infile = path+filename
data_full = fits.open(infile)
Ham17_full = data_full[1].data
#with fits.open(infile) as data:
# df_Ham17 = pd.DataFrame(data[0].data)
## knocking out a couple of objects with bad REW values
#Ham17 = Ham17_full[np.where( (Ham17['rew'] >0.) & (Ham17['rew'] < 10000.) )]
Ham17 = Ham17_full
## Kozłowski_2017_ApJS_228_9. BOSS DR12 "Value Added" catalog
path = '/cos_pc19a_npr/programs/quasars/CIV_CLQs/data/QSO_CIV_catalogs/'
filename = 'SDSS-DR12Q-BH_extra.fits'
infile = path+filename
data_full = fits.open(infile)
Koz17_full = data_full[1].data
Koz17 = Koz17_full
## astropy Table to pandas DataFrame #2804
## https://github.com/astropy/astropy/issues/2804
Ham17_table = Table(Ham17)
Ham17_df = DataFrame(np.array(Ham17_table))
print('len(Ham17_df)', len(Ham17_df))
Koz17_table = Table(Koz17)
## Have to remove this '5 value' columns in order for the
## DataFrame swith-aroo to work..
Koz17_table.remove_column('PSFFLUX')
Koz17_table.remove_column('IVAR_PSFFLUX')
Koz17_table.remove_column('PSFMAG')
Koz17_table.remove_column('ERR_PSFMAG')
## Some nice DataFrame polish/manipulation
Koz17_df = DataFrame(np.array(Koz17_table))
print('len(Koz17_df)', len(Koz17_df))
## Columns names are case senstive!
Koz17_df.rename(columns={'SDSS_NAME':'sdss_name'}, inplace=True)
## Testing on a wee bit of the DataFrame...
mini_merge = pd.merge(Ham17_df[0:100], Koz17_df[0:100], on="sdss_name")
The_DF = pd.merge(Ham17_df, Koz17_df, on="sdss_name")
## Just wanting to write a few things out to a simple text file.
file1 = open("temp.txt","w+")
#for ii in range(len(Koz17)): # if the full catalog is wanted!
for ii in range(100):
print(ii)
name = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['SDSS_NAME']
REW = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['rew']
bal_flag_vi = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['bal_flag_vi']
f1450 = Ham17[np.where((Koz17['SDSS_NAME'][ii] == Ham17['SDSS_NAME']))]['f1450']
if (len(name) > 0 and bal_flag_vi <1) :
#print(ii, Koz17['SDSS_NAME'][ii], name, REW, f1450)
file1.write(str(ii)+str(Koz17['SDSS_NAME'][ii])+str(we_name)+str(we_rew))
file1.write(" {} {} {} {} {} {} {} {} {} \n".format(ii, name, Koz17['RA'][ii], Koz17['DEC'][ii],
#bal_flag_vi,
REW, f1450,
Koz17['L1350'][ii], Koz17['LBol'][ii], #Koz17['eLBol'][ii],
Koz17['nEdd'][ii]))
file1.close()
|
[
"npross@lbl.gov"
] |
npross@lbl.gov
|
d372b40b45954581e27277427a4693ec3ac1125d
|
d92ce9a32bf20086e30701585a4e73c1f2469aff
|
/Programs/dsaenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__init__.py
|
0a8b952fb5bfc53a50be9c2858538e2322e08730
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
Prem-chouhan/fellowshipProgram_PremsinghChouhan
|
f61cf4407458f14ef7eb6d80effb25f9592d2552
|
33e6b57f6c75a80d8a3d1f868d379e85365a1336
|
refs/heads/master
| 2020-09-14T12:45:16.269268
| 2019-12-23T14:24:10
| 2019-12-23T14:24:10
| 223,128,906
| 0
| 1
| null | 2020-07-22T11:50:46
| 2019-11-21T08:47:28
|
Python
|
UTF-8
|
Python
| false
| false
| 519
|
py
|
# This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this inventorymanager
# for complete details.
from __future__ import absolute_import, division, print_function
from .__about__ import (
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
|
[
"antonyalexcm@gmail.com"
] |
antonyalexcm@gmail.com
|
8dcb09f3f8575bdcaa50a1cc60c11410132e1057
|
8b2e95525139765c5344cc7992203def31b4a300
|
/Python-PSdrone/DRONEtowple.py
|
1e7fde360af416ea7e17fcad4989e444ac0f22bb
|
[] |
no_license
|
tttienthinh/Drone
|
e84ff2b7dea32346e0717a9597a980e15a43543b
|
73e98a0e727128a6e290872befe3a42df3d45c7c
|
refs/heads/master
| 2022-04-21T20:16:26.169552
| 2020-04-10T19:31:20
| 2020-04-10T19:31:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
from ps_drone import *
from threading import Thread
class DroneTowple(Thread):
def __init__(self):
self.drone = Drone() # Start using drone
self.drone.startup() # Connects to drone and starts subprocesses
self.drone.reset() # Always good, at start
while self.drone.getBattery()[0] == -1: time.sleep(0.1) # Waits until the drone has done its reset
''' ICI LE NOM DES PACKAGES '''
self.packages = ['altitude', 'demo', 'pressure_raw', 'wind_speed', 'pwm']
self.drone.setConfig("control vz max", "0.04")
self.drone.useDemoMode(False)
self.drone.getNDpackage(self.packages)
Thread.__init__(self)
self.Quit = False
self.start()
def run(self):
input('Entre Enter pour quitter')
self.Quit = True
self.drone.land()
def NavData(self):
navData = self.drone.NavData
return navData
|
[
"tranthuongtienthinh@gmail.com"
] |
tranthuongtienthinh@gmail.com
|
f41bc5dcd8f481d090853be8b87143d2ea9ff3f8
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/JBkfqYW4iYwmgvwTf_5.py
|
582e98c416631fbc83dbdb5cd9b9d88f03ac5c67
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 695
|
py
|
"""
Create a function that returns `True` if a number is prime, and `False`
otherwise. A prime number is any positive integer that is evenly divisible by
only two divisors: 1 and itself.
The first ten prime numbers are:
2, 3, 5, 7, 11, 13, 17, 19, 23, 29
### Examples
is_prime(31) ➞ True
is_prime(18) ➞ False
is_prime(11) ➞ True
### Notes
* A prime number has no other factors except 1 and itself.
* If a number is odd it is not divisible by an even number.
* 1 is not considered a prime number.
"""
def is_prime(num):
if num == 1:
return False
else:
for i in range(2,num):
if num%i == 0:
return False
return True
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
d0e588e90b6e9d3db72d1392a01a7e6b09d3bdb7
|
1b5404b8099de74d4e39e0a41b1d04c61defa8d4
|
/five-words-five-letters/stuff/idea1.py
|
ed03774f208d9a8f86f2c874a5aa0592ccdb6ad7
|
[] |
no_license
|
ipeterov/random-stuff
|
5d07bdcfdcb145d06ed89095f2ad34b70ff0f0bd
|
dbb38d42331f636919fd149b23783e02ee2c9afb
|
refs/heads/master
| 2023-05-14T00:41:51.122251
| 2023-05-04T12:10:26
| 2023-05-04T12:10:26
| 206,028,412
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 505
|
py
|
import json
N_WORDS = 4
with open("data.json") as f:
data = json.load(f)
words = data["words"]
found_words = []
letters = set()
while len(found_words) < N_WORDS:
for word in words:
if set(word).intersection(letters):
continue
if not found_words:
print(f"Trying {word}")
found_words.append(word)
letters.update(word)
break
else:
found_words = []
letters = set()
words = words[1:]
print(found_words)
|
[
"ipeterov1@gmail.com"
] |
ipeterov1@gmail.com
|
744685a3a5a9654399524c7be7eeb912353c7b3b
|
e3abf21d5e3aac6de49395db8dae56565198a701
|
/workon/contrib/flow/redis/consumer.py
|
758084a6d86ba4b7505738396020753ea64447ba
|
[
"BSD-3-Clause"
] |
permissive
|
workon-io/django-workon_old
|
0bcb63025eda5d6815b082d23e95ab22385d6233
|
be935a07a855b2150b4b81ee87d5041761ff168e
|
refs/heads/master
| 2021-08-19T07:24:19.801106
| 2017-11-25T06:13:00
| 2017-11-25T06:13:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 994
|
py
|
import asyncio
from aioredis import create_connection, Channel
import websockets
async def publish_to_redis(msg, path):
# Connect to Redis
conn = await create_connection(('localhost', 6379))
# Publish to channel "lightlevel{path}"
await conn.execute('publish', 'lightlevel{}'.format(path), msg)
async def server(websocket, path):
try:
while True:
# Receive data from "the outside world"
message = await websocket.recv()
# Feed this data to the PUBLISH co-routine
await publish_to_redis(message, path)
await asyncio.sleep(1)
except websockets.exceptions.ConnectionClosed:
print('Connection Closed!')
if __name__ == '__main__':
# Boiler-plate for the websocket server, running on localhost, port 8765
loop = asyncio.get_event_loop()
loop.set_debug(True)
ws_server = websockets.serve(server, 'localhost', 8765)
loop.run_until_complete(ws_server)
loop.run_forever()
|
[
"autrusseau.damien@gmail.com"
] |
autrusseau.damien@gmail.com
|
f075789d1da3f0e16a12c9a8acfcde09b6e99bf2
|
8b57c6609e4bf3e6f5e730b7a4a996ad6b7023f0
|
/models/view_escaping/search.py
|
e2ff59769f0e7b547699cce159f1c3593f689047
|
[] |
no_license
|
bullll/splunk
|
862d9595ad28adf0e12afa92a18e2c96308b19fe
|
7cf8a158bc8e1cecef374dad9165d44ccb00c6e0
|
refs/heads/master
| 2022-04-20T11:48:50.573979
| 2020-04-23T18:12:58
| 2020-04-23T18:12:58
| 258,293,313
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,501
|
py
|
from __future__ import absolute_import
from builtins import object
from splunk.models.view_escaping.base import STRING_SEARCH_MODE, DEFAULT_SEARCH_ID,POST_SEARCH_MODE
from splunk.models.view_escaping.base import SAVED_SEARCH_MODE, TEMPLATE_SEARCH_MODE
from splunk.models.view_escaping.drilldown import parseEventHandler
from splunk.models.view_escaping.tokendeps import parseTokenDeps
class Search(object):
def __init__(self, searchMode=STRING_SEARCH_MODE, searchCommand="", earliestTime=None, latestTime=None, id=None, base=None, app=None, cache=None, sampleRatio=None, tokenDeps=None, refresh=None, refreshType=None):
self.searchMode = searchMode
self.searchCommand = searchCommand
self.earliestTime = earliestTime
self.latestTime = latestTime
self.id = id
self.baseSearchId = base
self.app = app
self.statusBuckets = 0
self.sampleRatio = sampleRatio
self.refresh = refresh
self.refreshType = refreshType
if self.searchMode == POST_SEARCH_MODE and self.baseSearchId == None:
self.baseSearchId = DEFAULT_SEARCH_ID
self.eventHandlers = []
self.cache = cache
self.tokenDeps = tokenDeps
def normalizedSearchCommand(self):
return self.searchCommand.strip()
def createSearchFromSearchXml(searchNode):
"""
Parses a search from search, dashboard, panel element xml nodes
@param searchNode: Lxml representing a form or dashboard element
@param id: and optional id to force id to
@return:
"""
opt = dict()
base = searchNode.attrib.get('base')
if searchNode.find('query') is not None:
opt['searchMode'] = TEMPLATE_SEARCH_MODE
opt['searchCommand'] = (
searchNode.findtext('query')).replace("\n", " ").replace("\t", " ")
sampleRatio = searchNode.findtext('sampleRatio')
if sampleRatio is not None:
opt['sampleRatio'] = int(sampleRatio)
elif searchNode.get('ref') is not None:
opt['searchMode'] = SAVED_SEARCH_MODE
opt['searchCommand'] = (
searchNode.get('ref')).replace("\n", " ").replace("\t", " ")
if searchNode.get('app') is not None:
opt['app'] = searchNode.get('app')
cacheVal = searchNode.findtext('cache')
if cacheVal:
opt['cache'] = cacheVal
elif not base:
return False
for nodePair in [('earliest', 'earliestTime'), ('latest', 'latestTime')]:
nodeVal = searchNode.findtext(nodePair[0])
if nodeVal:
opt[nodePair[1]] = nodeVal
refresh = searchNode.findtext('refresh')
if refresh is not None:
opt['refresh'] = refresh
refreshType = searchNode.findtext('refreshType')
if refreshType is not None:
opt['refreshType'] = refreshType
id = searchNode.attrib.get('id')
tokenDeps = parseTokenDeps(searchNode)
if id:
opt['id'] = id
if base:
opt['base'] = base
opt['searchMode'] = POST_SEARCH_MODE
if tokenDeps:
opt['tokenDeps'] = tokenDeps
search = Search(**opt)
for evtName in ('progress', 'preview', 'done', 'finalized', 'error', 'fail', 'cancelled'):
createEventHandlerFromXml(search, searchNode, evtName)
return search
def createEventHandlerFromXml(search, searchNode, eventName):
node = searchNode.find(eventName)
if node is not None:
search.eventHandlers.append((eventName, parseEventHandler(node, ('any', 'match'))))
|
[
"splunk@x.y"
] |
splunk@x.y
|
9ce428f475168d006ce5e4484cc7ed838008ff93
|
3545ee160458acac7452666aa07826b58e144351
|
/demo/text_recognition/__base__/test_scripts/test_crnn.py
|
e8f2480000631e7e11685879696526362fa2f576
|
[
"Apache-2.0"
] |
permissive
|
OCRWorld/DAVAR-Lab-OCR
|
7cc81af43a0e8f60066e7761d950f509c40cfd46
|
fb47a96d1a38f5ce634c6f12d710ed5300cc89fc
|
refs/heads/main
| 2023-08-29T09:41:19.377628
| 2021-11-08T11:16:37
| 2021-11-08T11:16:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,450
|
py
|
"""
##################################################################################################
# Copyright Info : Copyright (c) Davar Lab @ Hikvision Research Institute. All rights reserved.
# Filename : test_crnn.py
# Abstract : CRNN Model evaluation config
# Current Version: 1.0.0
# Date : 2021-06-11
##################################################################################################
"""
import os
_base_ = [
'../test_base_setting.py'
]
ckpts = list()
# model name setting
out_name = 'davar_test_crnn'
# model parameter dictionary
tmp_dict = dict()
# experiment Name
tmp_dict['Name'] = 'davar_test_crnn'
# ===================== model .pth file path ========================
tmp_dict['ModelPath'] = '/data1/workdir/davar_opensource/att_test/CRNN_pretrained-84c6eb23.pth'
out_name += '/' + tmp_dict['ModelPath'].split('/')[-2].split('.')[0]
# ===================== model config file path ========================
tmp_dict['ConfigPath'] = '/data1/open-source/demo/text_recognition/__base__/res32_bilstm_ctc.py'
# ===================== model test mode ========================
tmp_dict['Epochs'] = None
ckpts.append(tmp_dict)
# save result of the test experiment
out_path = os.path.join('/data1/output_dir/sota_exp', out_name + '/')
force_test = False
force_eval = False
do_test = 1 # 1 for test
do_eval = 1
test_path = out_path + 'res/'
eval_path = out_path + 'eval/'
|
[
"qiaoliang6@hikvision.com"
] |
qiaoliang6@hikvision.com
|
dc3d16d9e2f41a0ba7b4b953991209736a19d45f
|
0bc57447bedd04510a94a35cdee5b8ffdf3e5245
|
/musics/urls.py
|
4f2331c8f8e2ee46fc5241e057da0295090d012d
|
[] |
no_license
|
GH-Lim/DRF
|
243eead15f28bfbae056d01e0e0821058c39ca38
|
b839df7b8eb1adbc72f31b530700389536551308
|
refs/heads/master
| 2020-08-26T12:14:15.010107
| 2019-10-24T04:37:43
| 2019-10-24T04:37:43
| 217,007,653
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,211
|
py
|
from django.urls import path
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
from . import views
schema_view = get_schema_view(
openapi.Info(
title='Music API',
default_version='v1',
description='음악 관련 API 서비스입니다.',
)
)
app_name = 'musics'
urlpatterns = [
path('musics/', views.music_list, name='music_list'),
path('musics/<int:music_pk>/', views.music_detail, name='music_detail'),
path('musics/<int:music_pk>/comments/', views.comments_create, name='comments_create'),
path('artists/', views.artist_list_apply, name='artist_list_apply'),
path('artists/<int:artist_pk>/', views.artist_detail, name='artist_detail'),
path('artists/<int:artist_pk>/musics/', views.musics_create, name='musics_create'),
path('comments/', views.comment_list, name='comment_list'),
path('comments/<int:comment_pk>/', views.comments_update_and_delete, name='comments_update_and_delete'),
path('artists/<int:comment_pk>/', views.comment_detail, name='comment_detail'),
path('docs/', schema_view.with_ui('redoc'), name='api_docs'),
path('swagger/', schema_view.with_ui('swagger'), name='api_swagger'),
]
|
[
"gunhyuck11@gmail.com"
] |
gunhyuck11@gmail.com
|
6b5349d39fd002718d738dbc82a3a4e7a56d2951
|
f614e8567f9458e298c651d0be166da9fc72b4bf
|
/Django/Solutions/Library + guide/book_app/admin.py
|
f9c0e4512a43793359d50e572636f53a7f0fbdcf
|
[] |
no_license
|
PdxCodeGuild/class_Binary_Beasts
|
458c5be00b7bce3bb4ac9b7ab485c47f72be4294
|
b1298cb5d74513873f82be4ed37676f8b0de93dd
|
refs/heads/master
| 2023-06-28T07:05:21.703491
| 2021-07-29T03:44:09
| 2021-07-29T03:44:09
| 344,980,863
| 4
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 189
|
py
|
from django.contrib import admin
from .models import Author, Book, LandBook
# Register your models here.
admin.site.register(Author)
admin.site.register(Book)
admin.site.register(LandBook)
|
[
"ademichieli@squarespace.com"
] |
ademichieli@squarespace.com
|
4d09d8081569fd51a578fd6ce0ad57f163f3fc80
|
044bb7ac47cfc1a6dc685e81637d6049e5cee452
|
/backend_deploy_0330_21140/urls.py
|
4e516b775d26f86e79fef19a8ad7ccb1d5a5c7d4
|
[] |
no_license
|
crowdbotics-apps/backend-deploy-0330-21140
|
c1034bad2df5d212efd4b0d372a96af08e007444
|
57a5e272767f317d5072a6287bf86e80a321d2f5
|
refs/heads/master
| 2023-03-28T12:21:00.269768
| 2021-03-30T16:53:08
| 2021-03-30T16:53:08
| 353,063,297
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,279
|
py
|
"""backend_deploy_0330_21140 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
path("home/", include("home.urls")),
path("api/v1/", include("store.api.v1.urls")),
path("store/", include("store.urls")),
path("api/v1/", include("users.api.v1.urls")),
]
admin.site.site_header = "Backend Deploy 0330"
admin.site.site_title = "Backend Deploy 0330 Admin Portal"
admin.site.index_title = "Backend Deploy 0330 Admin"
# swagger
api_info = openapi.Info(
title="Backend Deploy 0330 API",
default_version="v1",
description="API documentation for Backend Deploy 0330 App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
db8d4d672410d929a4d61b58127f7fd8a6612656
|
aa1352a2f32c0c36194d3a6f8e683adba487a3eb
|
/FiRoom_backend/tryon/migrations/0002_userbodyshot.py
|
bef0d540635aef2bce342031f71eb08313e492d7
|
[] |
no_license
|
Ace-bb/FiRoom_backend
|
6c98d01c40e8de31ccbe86beaeada6c62516705e
|
efd4d9c1d7265e42f56638d5374a569a146acc03
|
refs/heads/main
| 2023-03-30T15:48:21.376390
| 2021-03-23T15:53:48
| 2021-03-23T15:53:48
| 338,780,869
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 622
|
py
|
# Generated by Django 3.1.4 on 2021-03-14 10:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tryon', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='userBodyShot',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userId', models.IntegerField()),
('userName', models.CharField(max_length=64)),
('shot', models.CharField(max_length=1024)),
],
),
]
|
[
"13489323285@163.com"
] |
13489323285@163.com
|
4e6beec5dd0e85ffc2488e15652415b40cee11c3
|
23130cd12e38dbce8db8102810edaad70b240ae2
|
/lintcode/594.py
|
a81e22c00509f4292f1898da5963ca42968389f4
|
[
"MIT"
] |
permissive
|
kangli-bionic/algorithm
|
ee6687c82101088db20f10fb958b4e45e97d3d31
|
c3c38723b9c5f1cc745550d89e228f92fd4abfb2
|
refs/heads/master
| 2023-01-05T09:29:33.204253
| 2020-10-25T17:29:38
| 2020-10-25T17:29:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,032
|
py
|
"""
robin karp:
abcdefgh, find string def
we only need to calculate a hash for def, that kaes O(K)
abcdefgh
---
abcdefgh
---
we only have to check the hash kth len's at a time, the hash can be culated in O(1) time
hash(bcd) = hash(abc) * 31 + e - 31^4 * a
it will take O(n) to check all the hash agaist the target hash.
for those rare cases where hashes clides, check letter by letter O(k)
total time O(n + k)
"""
class Solution:
"""
@param: source: A source string
@param: target: A target string
@return: An integer as index
"""
def strStr2(self, source, target):
# write your code here
if source is None or target is None:
return -1
if not target:
return 0
if not source:
return -1
BASE = 2000000
k = len(target) #3
highest_power = 1
for i in range(len(target)):
highest_power = (highest_power * 31) % BASE
#abc
#012 = (a31^2+b*31 + c)
#31^3= 1*31^31^31
hash_target = 0
for i in range(len(target)):
hash_target = (hash_target * 31 + ord(target[i])) % BASE
hash_code = 0
#0123456
#abcdefg 7
#efg 3
for i in range(len(source)):
#add next char
hash_code = (hash_code * 31 + ord(source[i])) % BASE
#a*31^3 + b*31*2+ c*31 +d
#abcd
#0123
if i >= k:
hash_code = (hash_code - highest_power * ord(source[i - k])) % BASE
if hash_code < 0:
hash_code += BASE
#match
if hash_code == hash_target:
match = True
for j in range(len(target)):
if source[i - k + 1 + j] != target[j]:
match = False
break
if match:
return i - k + 1
return -1
s = Solution()
source = ""
target = ""
print(s.strStr2(source, target))
|
[
"hipaulshi@gmail.com"
] |
hipaulshi@gmail.com
|
41b8ba509c04dd91d9f7df54a9af6e6ac7a56c52
|
f0d713996eb095bcdc701f3fab0a8110b8541cbb
|
/izfXy5SGfeekmKExH_15.py
|
dd6d5ebcb9a300d2e25edcd9ddd6522907132499
|
[] |
no_license
|
daniel-reich/turbo-robot
|
feda6c0523bb83ab8954b6d06302bfec5b16ebdf
|
a7a25c63097674c0a81675eed7e6b763785f1c41
|
refs/heads/main
| 2023-03-26T01:55:14.210264
| 2021-03-23T16:08:01
| 2021-03-23T16:08:01
| 350,773,815
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 899
|
py
|
"""
Write a function that takes two lists and adds the first element in the first
list with the first element in the second list, the second element in the
first list with the second element in the second list, etc, etc. Return `True`
if all element combinations add up to the same number. Otherwise, return
`False`.
### Examples
puzzle_pieces([1, 2, 3, 4], [4, 3, 2, 1]) ➞ True
# 1 + 4 = 5; 2 + 3 = 5; 3 + 2 = 5; 4 + 1 = 5
# Both lists sum to [5, 5, 5, 5]
puzzle_pieces([1, 8, 5, 0, -1, 7], [0, -7, -4, 1, 2, -6]) ➞ True
puzzle_pieces([1, 2], [-1, -1]) ➞ False
puzzle_pieces([9, 8, 7], [7, 8, 9, 10]) ➞ False
### Notes
* Each list will have at least one element.
* Return `False` if both lists are of different length.
"""
def puzzle_pieces(a1,a2):
return len(a1) == len(a2) and len(set(list(map(lambda x,y: x + y,a1,a2)))) == 1
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
4c5f13f15969a69343f4b1d22fc01fd48532443f
|
25d641a55eb868cc74fd4e3e4daea43f6deb5853
|
/3 - Web UI & API/virtual/Lib/site-packages/pip/_internal/req/_vti_cnf/__init__.py
|
47fccc4e6889f48b611d2cacf286d35b83631172
|
[] |
no_license
|
greens1/Final-Year-Project
|
370b7ef9979049cfc75e6776da24c7a286848b71
|
2e72f43893595deef6aa5323773a6161f2cd873a
|
refs/heads/master
| 2022-11-13T19:55:17.884414
| 2018-05-17T22:15:14
| 2018-05-17T22:15:14
| 133,852,365
| 1
| 0
| null | 2022-11-01T20:11:11
| 2018-05-17T18:21:16
|
Python
|
UTF-8
|
Python
| false
| false
| 193
|
py
|
vti_encoding:SR|utf8-nl
vti_timelastmodified:TR|15 Apr 2018 11:07:36 -0000
vti_extenderversion:SR|12.0.0.0
vti_cacheddtm:TX|15 Apr 2018 11:07:36 -0000
vti_filesize:IR|2152
vti_backlinkinfo:VX|
|
[
"greens1@tcd.ie"
] |
greens1@tcd.ie
|
0f261603cefc73452c72b9da92ee340e11c55b14
|
9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97
|
/sdBs/AllRun/rat_j0455+1305/sdB_RAT_J0455+1305_lc.py
|
ffb25211b57623347ad503f12006e96baf0c0b4a
|
[] |
no_license
|
tboudreaux/SummerSTScICode
|
73b2e5839b10c0bf733808f4316d34be91c5a3bd
|
4dd1ffbb09e0a599257d21872f9d62b5420028b0
|
refs/heads/master
| 2021-01-20T18:07:44.723496
| 2016-08-08T16:49:53
| 2016-08-08T16:49:53
| 65,221,159
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 351
|
py
|
from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[73.813417,13.091611], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_RAT_J0455+1305 /sdB_RAT_J0455+1305_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
|
[
"thomas@boudreauxmail.com"
] |
thomas@boudreauxmail.com
|
6539b59094b60080421eff95fd512b2b1b2ed89b
|
bb33e6be8316f35decbb2b81badf2b6dcf7df515
|
/source/res/scripts/client/gui/Scaleform/daapi/view/lobby/telecom_rentals/__init__.py
|
f6ea7400ff249f307dd982a1c74bf9bf2f1aaa75
|
[] |
no_license
|
StranikS-Scan/WorldOfTanks-Decompiled
|
999c9567de38c32c760ab72c21c00ea7bc20990c
|
d2fe9c195825ececc728e87a02983908b7ea9199
|
refs/heads/1.18
| 2023-08-25T17:39:27.718097
| 2022-09-22T06:49:44
| 2022-09-22T06:49:44
| 148,696,315
| 103
| 39
| null | 2022-09-14T17:50:03
| 2018-09-13T20:49:11
|
Python
|
UTF-8
|
Python
| false
| false
| 152
|
py
|
# Python bytecode 2.7 (decompiled from Python 2.7)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/telecom_rentals/__init__.py
pass
|
[
"StranikS_Scan@mail.ru"
] |
StranikS_Scan@mail.ru
|
e653e6a03c8f0d3af8e678a8ee480073889dfb04
|
362e68fa033cc42cf9981e5f0c441ef2fb4816e6
|
/scripts/pick_reference_otus_through_otu_table.py
|
5b07919bad20bc776fd595b77388ecffb039c3d5
|
[] |
no_license
|
kartoffelpuffer/qiime
|
6d409c058f777be3e17a7130d0902f4d0256795a
|
eeac244b5553579a8d0b540c31d6202acbc983d3
|
refs/heads/master
| 2020-12-25T02:39:52.585360
| 2013-04-25T16:59:25
| 2013-04-25T16:59:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 443
|
py
|
#!/usr/bin/env python
from __future__ import division
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011, The QIIME Project"
__credits__ = ["Greg Caporaso"]
__license__ = "GPL"
__version__ = "1.6.0-dev"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
__status__ = "Development"
print "\nThis script has been renamed pick_closed_reference_otus.py for clarity. For help, call pick_closed_reference_otus.py -h\n"
|
[
"gregcaporaso@gmail.com"
] |
gregcaporaso@gmail.com
|
0697cfae733c1c85df9bb03a68d69a1b583cc00a
|
9ac793d32e70775bb119aaddeb832624e3cf9281
|
/strkeyword3.py
|
2eb32f55412a855bb78a367f3294cc7ae3f400c3
|
[] |
no_license
|
prabhatpal77/Adv-python-polymorphism
|
9368311732e1bca9b54e099489c255e3498fbb9b
|
d68375e4816a746a1ffbffa6d179c50227267feb
|
refs/heads/master
| 2020-07-29T00:41:08.162385
| 2019-09-19T16:35:32
| 2019-09-19T16:35:32
| 209,601,547
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 324
|
py
|
# __init__ magic method with __str__ magic method.
class X:
def __init__(self, msg):
self.msg=msg
def display(self):
print("welcome")
def __str__(self):
return self.msg
x1=X("prabhat pal")
print(x1)
x1.display()
x2=X("python")
print(x2)
x2.display()
x3=X("django")
print(x3)
x3.display()
|
[
"noreply@github.com"
] |
prabhatpal77.noreply@github.com
|
ce67e22340faa26b9021729066f24d2f809865a6
|
3a84f9b61a21904251236c22aa893d6ca77a6650
|
/pyrosim/demos/ludobots/Demo_19_Torque.py
|
8baf3731228a475e057c4f798cd2676fd3b7de5b
|
[] |
no_license
|
davidmatthews1uvm/2020-ALIFE
|
8fd58d59c98364ccc8f40f14c6e0c6281d4d44de
|
bf8321f0112974b26239710ac7f3f42afb34aec8
|
refs/heads/master
| 2022-11-29T17:23:58.384592
| 2020-07-18T14:21:29
| 2020-07-18T14:21:29
| 272,540,984
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,396
|
py
|
import sys
sys.path.insert(0, '../..')
import pyrosim
import math
ARM_LENGTH = 0.75
ARM_RADIUS = ARM_LENGTH / 10.0
TORQUES = [0.1, 100.0]
SPEEDS = [0.1, 1.0]
# torque is max torque possible, not neccesarily the torque used
for torque in TORQUES:
for speed in SPEEDS:
sim = pyrosim.Simulator(eval_time=100)
cyl = sim.send_cylinder(x=0, y=0, z=2.0*ARM_LENGTH,
r1=0, r2=0, r3=1,
length=ARM_LENGTH, radius=ARM_RADIUS)
box = sim.send_box(x=0, y=0, z=1.25*ARM_LENGTH, length=ARM_RADIUS *
7., width=ARM_RADIUS*7.0, height=ARM_RADIUS*7.0)
world_cyl_joint = sim.send_hinge_joint(
first_body_id=-1, second_body_id=cyl,
x=0, y=0, z=2.5*ARM_LENGTH,
n1=1, n2=0, n3=0, lo=-3.14159/2.0, hi=+3.14159/2.0,
torque=torque, speed=speed, position_control=True
)
cyl_box_joint = sim.send_hinge_joint(
first_body_id=cyl, second_body_id=box,
x=0, y=0, z=1.5*ARM_LENGTH)
fneuron = sim.send_user_input_neuron(in_values=1)
mneuron = sim.send_motor_neuron(joint_id=world_cyl_joint)
sim.send_synapse(source_neuron_id=fneuron,
target_neuron_id=mneuron, weight=1.0)
sim.film_body(box, 'track')
sim.start()
sim.wait_to_finish()
|
[
"dmatthe1@uvm.edu"
] |
dmatthe1@uvm.edu
|
74f5cb59d17df02e4542da48b8d5020b5be8d921
|
6c3bb7feea3b3b029fe65de11954aee778ac3578
|
/sorting algorithms/radix_sort_imp.py
|
431a39dfefbe0d32954ecf059fd96db94d1d466e
|
[
"Unlicense"
] |
permissive
|
mkoryor/Python
|
72ebb2201c7f4887e023f541509da7e2c6fab5d5
|
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
|
refs/heads/master
| 2023-05-04T13:00:09.106811
| 2021-05-11T03:06:04
| 2021-05-11T03:06:04
| 114,468,023
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,220
|
py
|
class RadixSort(object):
def sort(self, array, base=10):
if array is None:
raise TypeError('array cannot be None')
if not array:
return []
max_element = max(array)
max_digits = len(str(abs(max_element)))
curr_array = array
for digit in range(max_digits):
buckets = [[] for _ in range(base)]
for item in curr_array:
buckets[(item//(base**digit))%base].append(item)
curr_array = []
for bucket in buckets:
curr_array.extend(bucket)
return curr_array
import unittest
class TestRadixSort(unittest.TestCase):
def test_sort(self):
radix_sort = RadixSort()
self.assertRaises(TypeError, radix_sort.sort, None)
self.assertEqual(radix_sort.sort([]), [])
array = [128, 256, 164, 8, 2, 148, 212, 242, 244]
expected = [2, 8, 128, 148, 164, 212, 242, 244, 256]
self.assertEqual(radix_sort.sort(array), expected)
print('Success: test_sort')
def main():
test = TestRadixSort()
test.test_sort()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
mkoryor.noreply@github.com
|
aa87dbc1381cc510f4fd0e691ac80df795db118b
|
a7b592be95dc2af9fdb56725f44e98cc59166e6f
|
/apps/common/biz_utils/utils_dictwrapper.py
|
a1f43066b827d3f81de5ed1bcdb7c7bd4e4e747f
|
[] |
no_license
|
cash2one/CRM-3
|
bc864c462d155b5dc6a51a5edbd564574b3e2f94
|
cedcaeb397ccadb36952534242bd296c5b4513bb
|
refs/heads/master
| 2021-06-14T07:40:53.572013
| 2017-05-23T15:52:06
| 2017-05-23T15:52:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,712
|
py
|
# coding=UTF-8
import re
RPT_PATTERN = re.compile(r'^([a-z]+)([1-9]{1}|1[0-5]{1})$')
class DictWrapper(dict):
def __getattr__(self, name):
try:
return super(DictWrapper, self).__getitem__(name)
except KeyError:
raise AttributeError("key %s not found" % name)
def __setattr__(self, name, value):
super(DictWrapper, self).__setitem__(name, value)
def __delattr__(self, name):
super(DictWrapper, self).__delitem__(name)
def hasattr(self, name):
return name in self
@classmethod
def load_dict(cls, org_data):
"""支持将嵌套的dict转成wrapper, e.g.:
test_dict = {'a':{'b':1,'c':[2,{'e':3}],'f':{'g':4}}}
ss = DictWrapper.load_dict(test_dict)
print ss.a.c[0].e
print ss.a.b
"""
if isinstance(org_data, dict):
dr = {}
for k,v in org_data.items():
dr.update({k:cls.load_dict(v)})
return cls(dr)
elif isinstance(org_data, (list, tuple)):
return [cls.load_dict(i) for i in org_data]
else:
return org_data
class KeywordGlobal(DictWrapper):
def __init__(self, g_pv = 0, g_click = 0, g_competition = 0, g_cpc = 0, g_coverage = 0, g_roi = 0, g_paycount = 0):
self.g_pv = g_pv
self.g_click = g_click
self.g_competition = g_competition
self.g_cpc = g_cpc
self.g_coverage = g_coverage
self.g_roi = g_roi
self.g_paycount = g_paycount
@property
def g_ctr(self):
'''返回全网点击率'''
if self.g_click and self.g_pv:
return self.g_click * 100.0 / self.g_pv
return 0.00
|
[
"956879357@qq.com"
] |
956879357@qq.com
|
81041ed0d6f75bedeba160908e717643a6fc408b
|
66fc0b6f603285f32544b90d6562a7f66e341abf
|
/parser.py
|
4638524e101de45969e96a083740910b38aabc11
|
[] |
no_license
|
openelections/openelections-data-hi
|
f4257cab929954218f111f9eab0d38568c41cd30
|
7b56c5ddd5448c4f62fc2ae30d0eadcad609bd19
|
refs/heads/master
| 2023-05-02T22:43:42.495617
| 2023-04-19T00:00:56
| 2023-04-19T00:00:56
| 96,584,468
| 0
| 5
| null | 2023-04-19T00:00:57
| 2017-07-07T23:54:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,383
|
py
|
# -*- coding: utf-8 -*-
import csv
import requests
OFFICES = ['President and Vice President', 'Governor', 'U.S. Representative', 'State Senator', 'State Representative', 'Lieutenant Governor', 'U.S. Senator']
precinct_file = open("precincts.txt", "rt")
csvfile = csv.DictReader(precinct_file, delimiter=',')
precincts = list(csvfile)
def general():
results = []
url = "https://elections.hawaii.gov/wp-content/results/media.txt"
r = requests.get(url)
decoded_content = r.text
reader = csv.DictReader(decoded_content.splitlines())
for row in reader:
county = next((p['COUNTY'] for p in precincts if row['Precinct_Name'] == p['PRECINCT']), None)
office = row['Contest_title']
if 'Dist' in office:
office, district = office.split(', Dist ')
if district == 'I':
district = "1"
elif district == 'I Vacancy':
district = "1 Unexpired"
elif district == 'II':
district = "2"
else:
district = None
party = row['Choice_party']
votes = int(row['Absentee_votes']) + int(row['Early_votes']) + int(row['Election_Votes'])
results.append([county, row['Precinct_Name'], office, district, party, row['Candidate_name'], row['Absentee_votes'], row['Early_votes'], row['Election_Votes'], votes])
with open('2020/20201103__hi__general__precinct.csv','wt') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(['county','precinct', 'office', 'district', 'party', 'candidate', 'absentee', 'early_votes', 'election_day', 'votes'])
csvwriter.writerows(results)
def primary():
results = []
url = "https://elections.hawaii.gov/wp-content/results/media.txt"
r = requests.get(url)
decoded_content = r.text
reader = csv.DictReader(decoded_content.splitlines(), delimiter=',', quotechar='"')
for row in reader:
if any(x in row['Contest_title'] for x in OFFICES):
county = next((p['COUNTY'] for p in precincts if row['Precinct_Name'] == p['PRECINCT']), None)
if row['Contest_title'] == 'SELECT A PARTY':
office = 'Straight Party'
party = None
else:
office, party = row['Contest_title'].split(' - ')
if 'Dist' in office:
office, district = office.split(', Dist ')
if district == 'I':
district = "1"
elif district == 'I Vacancy':
district = "1 Unexpired"
elif district == 'II':
district = "2"
else:
district = None
votes = int(row['Absentee_votes']) + int(row['Early_votes']) + int(row['Election_Votes'])
results.append([county, row['Precinct_Name'], office, district, party, row['Candidate_name'], row['Absentee_votes'], row['Early_votes'], row['Election_Votes'], votes])
with open('2018/20180811__hi__primary__precinct.csv','w') as csvfile:
csvwriter = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
csvwriter.writerow(['county','precinct', 'office', 'district', 'party', 'candidate', 'absentee', 'early_votes', 'election_day', 'votes'])
csvwriter.writerows(results)
if __name__ == "__main__":
# general()
primary()
|
[
"dwillis@gmail.com"
] |
dwillis@gmail.com
|
dba8bef202bdd565edd4a902adbacc05ed643e9a
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02609/s490665806.py
|
a8dbceee4a95774d3a0ccfe52e582fddc2f1fdeb
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 518
|
py
|
def f(n):
ans=1
while n!=0:
ans+=1
n%=bin(n).count("1")
return ans
n=int(input())
x=input()
o=x.count("1")
if o==0:exit(print(*[1]*n))
if o==1:
if x[-1]=="1":
ans=[2]*n
ans[-1]=0
else:
ans=[1]*n
ans[-1]=2
ans[x.index("1")]=0
exit(print(*ans))
mo=0
mz=0
for i in range(n):
if x[n-i-1]=="1":
mo=(pow(2,i,o+1)+mo)%(o+1)
mz=(pow(2,i,o-1)+mz)%(o-1)
for i in range(n):
if x[i]=="1":
m=(mz-pow(2,n-i-1,o-1))%(o-1)
else:
m=(mo+pow(2,n-i-1,o+1))%(o+1)
print(f(m))
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
455a1ea7e2943449754c8994accb0933e72d7732
|
bd211803ddb664c2ba937abdb14dd8a34429e999
|
/kokkuvote/migrations/0001_initial.py
|
e128e6762fca91d41c8bbc76ae2e64598c981041
|
[] |
no_license
|
alvarantson/emartauto
|
f8055257966964c75363bfed881f861c411dbf9d
|
c81fd15e509ac85f22c7a6249cecda040bbf78ff
|
refs/heads/master
| 2022-02-21T19:13:15.757053
| 2022-02-06T14:50:29
| 2022-02-06T14:50:29
| 218,960,558
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 491
|
py
|
# Generated by Django 2.2.9 on 2020-04-17 11:55
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='google_link',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('link', models.CharField(max_length=999)),
],
),
]
|
[
"alvarantson@gmail.com"
] |
alvarantson@gmail.com
|
054492b4c0a901a426a9aa22ee5e584cbee95884
|
2c4763aa544344a3a615f9a65d1ded7d0f59ae50
|
/playground/cfg_cache/wscript
|
9303a0b4fe3444276207a25f55d371502561e5f1
|
[] |
no_license
|
afeldman/waf
|
572bf95d6b11571bbb2941ba0fe463402b1e39f3
|
4c489b38fe1520ec1bc0fa7e1521f7129c20f8b6
|
refs/heads/master
| 2021-05-09T18:18:16.598191
| 2019-03-05T06:33:42
| 2019-03-05T06:33:42
| 58,713,085
| 0
| 0
| null | 2016-05-13T07:34:33
| 2016-05-13T07:34:33
| null |
UTF-8
|
Python
| false
| false
| 401
|
#! /usr/bin/env python
"""
compare the execution time of
waf configure
and
waf configure --confcache
"""
top = '.'
out = 'build'
def options(opt):
opt.load('compiler_c')
opt.add_option('--confcache', dest='confcache', default=0, action='count', help='Use a configuration cache')
def configure(conf):
conf.load('compiler_c')
conf.check(fragment='int main() { return 0; }')
|
[
"anton.feldmann@outlook.de"
] |
anton.feldmann@outlook.de
|
|
ca8707be7abdfa925b8ebdeabf5485b6943a3066
|
6545714ada44ce8a3bc3a55dfc9abb3ea9282c05
|
/code/figures/si/figS0X_histograms.py
|
cb07599ef7ab19959aacb74dddf6909977cae293
|
[
"MIT",
"CC-BY-4.0",
"CC-BY-3.0"
] |
permissive
|
RPGroup-PBoC/bursty_transcription
|
88b8e30f1fa05b2a57319aa73ab22c3e51f4cb3a
|
cd3082c567168dfad12c08621976ea49d6706f89
|
refs/heads/master
| 2023-02-08T07:19:13.599192
| 2020-12-15T22:27:44
| 2020-12-15T22:27:44
| 229,149,541
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,444
|
py
|
# %%
import enum
import re
import dill
from git import Repo #for directory convenience
import numpy as np
import pandas as pd
import emcee
import arviz as az
import matplotlib.pyplot as plt
import seaborn as sns
import bebi103.viz
import srep
srep.viz.plotting_style()
pboc_colors = srep.viz.color_selector('pboc')
# %%
fig, ax = plt.subplots(4, 3, figsize=(8.5, 10), sharex=False, sharey=False)
# # Modify tick font size
# for a in ax:
# a.tick_params(axis="both", which="major", labelsize=8)
repo = Repo("./", search_parent_directories=True)
# repo_rootdir holds the absolute path to the top-level of our repo
repo_rootdir = repo.working_tree_dir
# Select PBoC color palette
colors = srep.viz.color_selector('pboc')
# Set PBoC plotting style
srep.viz.plotting_style()
# load in the pickled samples
pklfile = open(
f"{repo_rootdir}/data/mcmc_samples/repression_pooled_expts.pkl", 'rb'
)
model, sampler, ppc_uv5, ppc_rep = dill.load(pklfile)
pklfile.close()
inf_dat = az.convert_to_inference_data(
sampler, var_names=model.var_labels
)
data_uv5, data_rep = srep.utils.condense_data(model.expts)
n_dim = np.shape(model.var_labels)
# Define operators
op_array = ["Oid", "O1", "O2"]
# Define aTc concentrations
aTc_array = ["0p5ngmL", "1ngmL", "2ngmL", "10ngmL"]
# Set global colors for aTc concentrations
aTc_colors = ('blue', 'betancourt', 'green', 'orange')
aTc_col_dict = dict(zip(aTc_array , aTc_colors))
# organize all the options upfront
all_expts = (
("Oid_2ngmL", "Oid_1ngmL"),
("O1_1ngmL", "O1_2ngmL", "O1_10ngmL"),
("O2_0p5ngmL", "O2_1ngmL", "O2_2ngmL", "O2_10ngmL")
)
# Loop through operators concentrations
for op_idx, op in enumerate(op_array):
# List experiments available for operator
op_exp = all_expts[op_idx]
# Loop through aTc concentrations
for aTc_idx, aTc in enumerate(aTc_array):
# Define aTc concentration color
col = aTc_col_dict[aTc]
color = srep.viz.bebi103_colors()[col]
# Define experiment
expt = f"{op}_{aTc}"
# Add operator top of colums
if aTc_idx == 0:
label = f"operator {op}"
ax[aTc_idx, op_idx].set_title(label, bbox=dict(facecolor="#ffedce"))
# Add aTc concentration to right plots
if op_idx == 2:
# Generate twin axis
axtwin = ax[aTc_idx, op_idx].twinx()
# Remove ticks
axtwin.get_yaxis().set_ticks([])
# Fix label
label = expt.split("_")[1]
label = label.replace("ngmL", " ng/mL")
label = label.replace("0p5", "0.5")
# Set label
axtwin.set_ylabel(
f"[aTc] {label}",
bbox=dict(facecolor="#ffedce"),
)
# Remove residual ticks from the original left axis
ax[aTc_idx, op_idx].tick_params(color="w", width=0)
# Add ylabel to left plots
# if op_idx == 0:
# ax[aTc_idx, op_idx].set_ylabel("probability")
# Check if experiment exists, if not, skip experiment
if expt not in op_exp:
ax[aTc_idx, op_idx].set_facecolor("#D3D3D3")
ax[aTc_idx, op_idx].tick_params(axis='x', colors='white')
ax[aTc_idx, op_idx].tick_params(axis='y', colors='white')
continue
# Find experiment index
expt_idx = model.expts.index(expt)
# Extract PPC samples and unpack them to raw format
ppc_samples = srep.utils.uncondense_ppc(ppc_rep[expt_idx])
# Define bins in histogram
bins = np.arange(0, ppc_samples.max() + 1)
# Initialize matrix to save histograms
hist_mat = np.zeros([ppc_samples.shape[0], len(bins) - 1])
# Loop through each ppc sample and compute histogram
for s_idx, s in enumerate(ppc_samples):
hist_mat[s_idx] = np.histogram(s, bins=bins, density=True)[0]
# Find percentiles to be plot
lower_tile = np.percentile(hist_mat, 2.5, axis=0)
upper_tile = np.percentile(hist_mat, 97.5, axis=0)
mid_tile = np.percentile(hist_mat, 50, axis=0)
# Extract data
expt_data = srep.utils.uncondense_valuescounts(data_rep[expt_idx])
# Compute histogram for data
hist_data = np.histogram(expt_data, bins=bins, density=True)[0]
# Plot predicted histogram with percentiles
# 95% percentile
ax[aTc_idx, op_idx].fill_between(
bins[:-1],
lower_tile,
upper_tile,
step="post",
edgecolor=color[0],
color=color[0]
)
# median
ax[aTc_idx, op_idx].step(
bins[:-1],
mid_tile,
where="post",
color=color[-1]
)
# add data on top
ax[aTc_idx, op_idx].step(
bins[:-1],
hist_data,
where="post",
color="black",
linewidth=1.25
)
# Set x-label
ax[aTc_idx, op_idx].set_xlabel("mRNA / cell")
ax[aTc_idx, op_idx].set_ylabel("probability")
# Set axis limit
upper_limit = np.where(hist_data > 5E-3)[0][-1]
ax[aTc_idx, op_idx].set_xlim(0, upper_limit)
# Adjust spacing between plots
plt.subplots_adjust(hspace=0.3, wspace=0.4)
plt.savefig(
f"{repo_rootdir}/figures/si/figS0X_histograms.pdf", bbox_inches='tight'
)
# %%
|
[
"manuel.razo.m@gmail.com"
] |
manuel.razo.m@gmail.com
|
93f6e5d8bb3b2467e9215ebface9f1f79610489f
|
18c1cbda3f9f6ca9cc9a27e93ddfece583c4fe43
|
/projects/DensePose/densepose/config.py
|
d4366b11a115d5d9673008dd2df9cdda193fda82
|
[
"Apache-2.0"
] |
permissive
|
zzzzzz0407/detectron2
|
0bd8e5def65eb72bc9477f08f8907958d9fd73a1
|
021fc5b1502bbba54e4714735736898803835ab0
|
refs/heads/master
| 2022-12-04T14:25:36.986566
| 2020-08-26T10:39:30
| 2020-08-26T10:39:30
| 276,800,695
| 1
| 0
|
Apache-2.0
| 2020-07-03T03:42:26
| 2020-07-03T03:42:25
| null |
UTF-8
|
Python
| false
| false
| 3,394
|
py
|
# -*- coding = utf-8 -*-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from detectron2.config import CfgNode as CN
def add_dataset_category_config(cfg: CN):
"""
Add config for additional category-related dataset options
- category whitelisting
- category mapping
"""
_C = cfg
_C.DATASETS.CATEGORY_MAPS = CN(new_allowed=True)
_C.DATASETS.WHITELISTED_CATEGORIES = CN(new_allowed=True)
def add_densepose_config(cfg: CN):
"""
Add config for densepose head.
"""
_C = cfg
_C.MODEL.DENSEPOSE_ON = True
_C.MODEL.ROI_DENSEPOSE_HEAD = CN()
_C.MODEL.ROI_DENSEPOSE_HEAD.NAME = ""
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS = 8
# Number of parts used for point labels
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES = 24
_C.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL = 4
_C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM = 512
_C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL = 3
_C.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE = 2
_C.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE = 112
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE = "ROIAlignV2"
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION = 28
_C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO = 2
_C.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS = 2 # 15 or 2
# Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
_C.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD = 0.7
# Loss weights for annotation masks.(14 Parts)
_C.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS = 5.0
# Loss weights for surface parts. (24 Parts)
_C.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS = 1.0
# Loss weights for UV regression.
_C.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS = 0.01
# Coarse segmentation is trained using instance segmentation task data
_C.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS = False
# For Decoder
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON = True
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES = 256
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS = 256
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM = ""
_C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE = 4
# For DeepLab head
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB = CN()
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM = "GN"
_C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON = 0
# Confidences
# Enable learning UV confidences (variances) along with the actual values
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE = CN({"ENABLED": False})
# UV confidence lower bound
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON = 0.01
# Enable learning segmentation confidences (variances) along with the actual values
_C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE = CN({"ENABLED": False})
# Segmentation confidence lower bound
_C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON = 0.01
# Statistical model type for confidence learning, possible values:
# - "iid_iso": statistically independent identically distributed residuals
# with isotropic covariance
# - "indep_aniso": statistically independent residuals with anisotropic
# covariances
_C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE = "iid_iso"
# List of angles for rotation in data augmentation during training
_C.INPUT.ROTATION_ANGLES = [0]
_C.TEST.AUG.ROTATION_ANGLES = () # Rotation TTA
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
2d208e527b45d814669dbc1dcaafd017efa974cf
|
2af6a5c2d33e2046a1d25ae9dd66d349d3833940
|
/res_bw/scripts/client/fx/events/setorbitorpoint.py
|
ce1f72d4a16ab75244a47fce7c26186c59c9120e
|
[] |
no_license
|
webiumsk/WOT-0.9.12-CT
|
e6c8b5bb106fad71b5c3056ada59fb1aebc5f2b2
|
2506e34bd6634ad500b6501f4ed4f04af3f43fa0
|
refs/heads/master
| 2021-01-10T01:38:38.080814
| 2015-11-11T00:08:04
| 2015-11-11T00:08:04
| 45,803,240
| 0
| 0
| null | null | null | null |
WINDOWS-1250
|
Python
| false
| false
| 1,334
|
py
|
# 2015.11.10 21:32:17 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/FX/Events/SetOrbitorPoint.py
from FX import s_sectionProcessors
from ParticleSubSystem import *
import Pixie
from bwdebug import *
class SetOrbitorPoint(ParticleSubSystem):
"""
This class implements an event that sets the world location of an orbitor
to the position of the Effect source when the effect is started.
"""
def __init__(self):
ParticleSubSystem.__init__(self)
def isInteresting(self, subSystem):
act = subSystem.action(ORBITOR_PSA)
return act != None
def setOrbitorPoint(self, actor, source, target, subSystem):
try:
act = subSystem.action(ORBITOR_PSA)
act.point = source.position
except:
ERROR_MSG('setOrbitorPoint has a problem with finding the position of the source object', source)
def go(self, effect, actor, source, target, **kargs):
self.subSystemIterate(actor, source, target, self.setOrbitorPoint)
return 0.0
s_sectionProcessors['SetOrbitorPoint'] = SetOrbitorPoint
# okay decompyling c:\Users\PC\wotsources\files\originals\res_bw\scripts\client\fx\events\setorbitorpoint.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.10 21:32:17 Střední Evropa (běžný čas)
|
[
"info@webium.sk"
] |
info@webium.sk
|
e13e7d92bfdb8255a0c0eccbbcacd0f29a374af3
|
999ed80db247794159be1d752bc6f0fc272bd117
|
/spytest/spytest/env.py
|
9daccb6af3427a3131e4466266a4203d71bb5e5e
|
[
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] |
permissive
|
ramakristipati/sonic-mgmt
|
7fee876412f0121da96d751f7d199690c73496f3
|
a86f0e5b1742d01b8d8a28a537f79bf608955695
|
refs/heads/master
| 2023-08-31T07:55:38.446663
| 2023-08-31T06:34:53
| 2023-08-31T06:34:53
| 315,448,103
| 2
| 0
|
NOASSERTION
| 2020-11-23T21:44:07
| 2020-11-23T21:44:07
| null |
UTF-8
|
Python
| false
| false
| 6,803
|
py
|
import os
max_buckets = 32
defaults = {
"SPYTEST_ONIE_FAIL_ON_NORMAL_PROMPT": "1",
"SPYTEST_LOGS_TIME_FMT_ELAPSED": "0",
"SPYTEST_NO_CONSOLE_LOG": "0",
"SPYTEST_PROMPTS_FILENAME": None,
"SPYTEST_TEXTFSM_INDEX_FILENAME": None,
"SPYTEST_UI_POSITIVE_CASES_ONLY": "0",
"SPYTEST_REPEAT_MODULE_SUPPORT": "0",
"SPYTEST_FILE_PREFIX": "results",
"SPYTEST_RESULTS_PREFIX": None,
"SPYTEST_RESULTS_PNG": "1",
"SPYTEST_MODULE_CSV_FILENAME": "modules.csv",
"SPYTEST_MODULE_INFO_CSV_FILENAME": "module_info.csv",
"SPYTEST_FUNCTION_INFO_CSV_FILENAME": "function_info.csv",
"SPYTEST_TCMAP_CSV_FILENAME": "tcmap.csv,tcmap-ut.csv",
"SPYTEST_TESTBED_IGNORE_CONSTRAINTS": "",
"SPYTEST_FLEX_DUT": "1",
"SPYTEST_FLEX_PORT": "0",
"SPYTEST_MGMT_IFNAME": "eth0",
"SPYTEST_TOPO_SEP": None,
"SPYTEST_TESTBED_RANDOMIZE_DEVICES": "0",
"SPYTEST_TOPO_1": "D1T1:2",
"SPYTEST_TOPO_2": "D1T1:4 D1D2:6 D2T1:2",
"SPYTEST_TOPO_4": "D1T1:2 D2T1:2 D3T1:2 D4T1:2 D1D2:4 D2D3:4 D3D4:4 D4D1:4",
"SPYTEST_TOPO_6": "D1D3:4 D1D4:4 D1D5:2 D1D6:4 D2D3:4 D2D4:4 D2D5:4 D2D6:4 D3T1:2 D4T1:2 D5T1:2 D6T1:2",
"SPYTEST_EMAIL_BODY_PREFIX": "",
"SPYTEST_TECH_SUPPORT_ONERROR": "system,port_list,port_status,console_hang,on_cr_recover",
"SPYTEST_SAVE_CLI_TYPE": "1",
"SPYTEST_SAVE_CLI_CMDS": "1",
"SPYTEST_SHUTDOWN_FREE_PORTS": "0",
"SPYTEST_ABORT_ON_VERSION_MISMATCH": "2",
"SPYTEST_TOPOLOGY_STATUS_MAX_WAIT": "60",
"SPYTEST_TOPOLOGY_STATUS_ONFAIL_ABORT": "module",
"SPYTEST_LIVE_RESULTS": "1",
"SPYTEST_DEBUG_FIND_PROMPT": "0",
"SPYTEST_KDUMP_ENABLE": "0",
"SPYTEST_LOG_DUTID_FMT": "LABEL",
"SPYTEST_SYSRQ_ENABLE": "0",
"SPYTEST_SET_STATIC_IP": "1",
"SPYTEST_ONREBOOT_RENEW_MGMT_IP": "0",
"SPYTEST_DATE_SYNC": "1",
"SPYTEST_BOOT_FROM_GRUB": "0",
"SPYTEST_RECOVERY_MECHANISMS": "1",
"SPYTEST_RESET_CONSOLES": "1",
"SPYTEST_ONCONSOLE_HANG": "recover",
"SPYTEST_CONNECT_DEVICES_RETRY": "10",
"SPYTEST_OPENCONFIG_API": "GNMI",
"SPYTEST_IFA_ENABLE": "0",
"SPYTEST_ROUTING_CONFIG_MODE": None,
"SPYTEST_CLEAR_MGMT_INTERFACE": "0",
"SPYTEST_CLEAR_DEVICE_METADATA_HOSTNAME": "0",
"SPYTEST_CLEAR_DEVICE_METADATA_BGP_ASN": "0",
"SPYTEST_NTP_CONFIG_INIT": "0",
"SPYTEST_BASE_CONFIG_RETAIN_FDB_AGETIME": "0",
"SPYTEST_GENERATE_CERTIFICATE": "0",
"SPYTEST_HOOKS_SYSTEM_STATUS_UITYPE": "",
"SPYTEST_HOOKS_PORT_ADMIN_STATE_UITYPE": "click",
"SPYTEST_HOOKS_PORT_STATUS_UITYPE": "click",
"SPYTEST_HOOKS_VERSION_UITYPE": "click",
"SPYTEST_HOOKS_BREAKOUT_UITYPE": "klish",
"SPYTEST_HOOKS_SPEED_UITYPE": "",
"SPYTEST_IFNAME_MAP_UITYPE": "click",
"SPYTEST_IFNAME_TYPE_UITYPE": "klish",
"SPYTEST_API_INSTRUMENT_SUPPORT": "0",
"SPYTEST_REDIS_DB_CLI_TYPE": "1",
"SPYTEST_TOPOLOGY_SHOW_ALIAS": "0",
"SPYTEST_TOPOLOGY_STATUS_FAST": "1",
"SPYTEST_BGP_API_UITYPE": "",
"SPYTEST_BGP_CFG_API_UITYPE": "",
"SPYTEST_BGP_SHOW_API_UITYPE": "",
"SPYTEST_RECOVERY_CTRL_C": "1",
"SPYTEST_RECOVERY_CTRL_Q": "1",
"SPYTEST_SOFT_TGEN_WAIT_MULTIPLIER": "2",
"SPYTEST_SUDO_SHELL": "1",
# CSV: normal, fast, rps
"SPYTEST_SYSTEM_NREADY_RECOVERY_METHODS": "normal",
"SPYTEST_DETECT_CONCURRENT_ACCESS": "1",
"SPYTEST_SYSLOG_ANALYSIS": "1",
"SPYTEST_USE_NO_MORE": "0",
"SPYTEST_PRESERVE_GNMI_CERT": "1",
"SPYTEST_CMD_FAIL_RESULT_SUPPORT": "1",
"SPYTEST_USE_FULL_NODEID": "0",
"SPYTEST_BATCH_DEFAULT_BUCKET": "1",
"SPYTEST_BATCH_DEAD_NODE_MAX_TIME": "0",
"SPYTEST_BATCH_POLL_STATUS_TIME": "0",
"SPYTEST_BATCH_SAVE_FREE_DEVICES": "1",
"SPYTEST_BATCH_TOPO_PREF": "0",
"SPYTEST_TECH_SUPPORT_DELETE_ON_DUT": "0",
"SPYTEST_SHOWTECH_MAXTIME": "1200",
"SPYTEST_ABORT_ON_APPLY_BASE_CONFIG_FAIL": "1",
"SPYTEST_TCMAP_DEFAULT_TRYSSH": "0",
"SPYTEST_TCMAP_DEFAULT_FASTER_CLI": "0",
"SPYTEST_RECOVERY_CR_FAIL": "0",
"SPYTEST_RECOVER_FROM_ONIE_ON_REBOOT": "0",
"SPYTEST_RECOVER_FROM_ONIE_WTIHOUT_IP": "1",
}
dev_defaults = {
"SPYTEST_TOPOLOGY_SIMULATE_FAIL": "0",
"SPYTEST_REST_TEST_URL": None,
"SPYTEST_BATCH_BACKUP_NODES": None,
"SPYTEST_BATCH_RERUN_NODES": None,
"SPYTEST_BATCH_MODULE_TOPO_PREF": None,
"SPYTEST_BATCH_MATCHING_BUCKET_ORDER": "larger,largest",
"SPYTEST_BATCH_RERUN": None,
"SPYTEST_TESTBED_FILE": "testbed.yaml",
"SPYTEST_FILE_MODE": "0",
"SPYTEST_SCHEDULING": None,
"SPYTEST_BATCH_RUN": None,
"PYTEST_XDIST_WORKER": None,
"SPYTEST_BUCKETS_DEADNODE_SIMULATE": "0",
"SPYTEST_USER_ROOT": None,
"SPYTEST_CMDLINE_ARGS": "",
"SPYTEST_SUITE_ARGS": "",
"SPYTEST_TEXTFSM_DUMP_INDENT_JSON": None,
"SPYTEST_TESTBED_EXCLUDE_DEVICES": None,
"SPYTEST_TESTBED_INCLUDE_DEVICES": None,
"SPYTEST_LOGS_PATH": None,
"SPYTEST_LOGS_LEVEL": "info",
"SPYTEST_APPLY_BASE_CONFIG_AFTER_MODULE": "0",
"SPYTEST_COMMUNITY_BUILD_FEATURES": "0",
"SPYTEST_SYSTEM_READY_AFTER_PORT_SETTINGS": "0",
"SPYTEST_TCLIST_FILE": None,
"SPYTEST_MODULE_REPORT_SORTER": "CDT",
"SPYTEST_ASAN_OPTIONS": "",
"SPYTEST_RECOVER_INITIAL_SYSTEM_NOT_READY": "0",
"SPYTEST_LIVE_TRACE_OUTPUT": "0",
"SPYTEST_USE_SAMPLE_DATA": "0",
"SPYTEST_DRYRUN_CMD_DELAY": "0",
"SPYTEST_FASTER_CLI_OVERRIDE": None,
"SPYTEST_FASTER_CLI_LAST_PROMPT": "1",
"SPYTEST_NEW_FIND_PROMPT": "0",
"SPYTEST_SPLIT_COMMAND_LIST": "0",
"SPYTEST_CHECK_SKIP_ERROR": "0",
"SPYTEST_HELPER_CONFIG_DB_RELOAD": "yes",
"SPYTEST_CHECK_HELPER_SIGNATURE": "0",
"SPYTEST_CLICK_HELPER_ARGS": "",
}
def _get_logs_path():
user_root = os.getenv("SPYTEST_USER_ROOT", os.getcwd())
logs_path = os.getenv("SPYTEST_LOGS_PATH", user_root)
if not os.path.isabs(logs_path):
logs_path = os.path.join(user_root, logs_path)
if not os.path.exists(logs_path):
os.makedirs(logs_path)
return logs_path
def _get_defaults():
if "SPYTEST_TOPO_{}".format(max_buckets) not in defaults:
for i in range(1, max_buckets + 1):
name = "SPYTEST_TOPO_{}".format(i)
if name not in defaults:
value = ["D{}".format(n + 1) for n in range(i)]
defaults[name] = " ".join(value)
return defaults
def get(name, default=None):
cur_def = _get_defaults().get(name, default)
if cur_def is None and default is not None:
cur_def = default
retval = os.getenv(name, cur_def)
return retval
def getint(name, default=0):
return int(get(name) or default)
def match(name, expected, default=None):
return bool(expected == get(name, default))
def get_default_all():
return sorted(_get_defaults().items())
def set_default(name, value):
defaults[name] = value
|
[
"noreply@github.com"
] |
ramakristipati.noreply@github.com
|
7329bf28f44b2f6cbded2cd24158892a1a7d480d
|
36e12b65922ebbb6d95aff6cbac0777c47e24153
|
/getlongest3UTR.py
|
3bbdeb30cbe012570b84894898bf2fa0c6690df2
|
[
"MIT"
] |
permissive
|
NailouZhang/AnalysisScripts
|
d0d00174f642d6722cc907f9a392084600630780
|
3df37d2f8fca9bc402afe5ea870c42200fca1ed3
|
refs/heads/master
| 2023-06-06T08:14:39.064920
| 2021-06-22T16:46:26
| 2021-06-22T16:46:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,362
|
py
|
import argparse
import sys
from Bio import SeqIO
from Bio.SeqUtils import GC
from numpy import mean
def nameconversion(ens_to_short):
#Given a file that has ENSG IDs and their corresponding short names, make a dictionary.
ens2short = {} # {ENSMUSG000000 : gene_short_name}
infh = open(ens_to_short, 'r')
for line in infh:
line = line.strip().split('\t')
if line[0].startswith('ENSMUSG'):
ens2short[line[0]] = line[1]
infh.close()
return ens2short
def getlongestUTRs(UTRgff, ens2short):
#Given a gff of UTRs (usualy mm9_ensGene.3putrs.gff), get the longest UTR for that gene.
UTRs = {} # {ENSGENEID : [chrm, start, stop, strand]}
infh = open(UTRgff, 'r')
for line in infh:
line = line.strip().split('\t')
chrm = line[0]
start = int(line[3])
stop = int(line[4])
strand = line[6]
gene = line[8].split(';')[-1]
if gene in ens2short and 'random' not in chrm:
#Don't deal with any gene that doesn't have a short name or is on chr_random
gene_short_name = ens2short[gene]
else:
continue
length = stop - start
if gene_short_name not in UTRs:
UTRs[gene_short_name] = [chrm, start, stop, strand]
elif gene_short_name in UTRs:
currentlength = UTRs[gene_short_name][2] - UTRs[gene_short_name][1]
if length > currentlength:
UTRs[gene_short_name] = [chrm, start, stop, strand]
infh.close()
print 'Have UTRs for {0} genes.'.format(len(UTRs))
return UTRs
def getsequences(UTRs, genomefasta, genes):
genesofinterest = []
GCs = []
lengths = []
infh = open(genes, 'r')
for line in infh:
line = line.strip()
genesofinterest.append(line)
infh.close()
seqs = {} # {genename : UTR_sequence}
sys.stderr.write('Indexing genome sequence...\n')
seq_dict = SeqIO.to_dict(SeqIO.parse(genomefasta, 'fasta'))
sys.stderr.write('{0} chromosomes indexed.\n'.format(len(seq_dict)))
for UTR in UTRs:
chrm = UTRs[UTR][0]
start = UTRs[UTR][1]
stop = UTRs[UTR][2]
strand = UTRs[UTR][3]
if strand == '+':
UTRseq = seq_dict[chrm].seq[start - 1 : stop].upper()
elif strand == '-':
UTRseq = seq_dict[chrm].seq[start - 1 : stop].upper().reverse_complement()
if UTR in genesofinterest:
seqs[UTR] = str(UTRseq)
GCs.append(GC(str(UTRseq)))
lengths.append(len(str(UTRseq)))
print 'Started with {0} genes. Found UTR sequences for {1} of them. Their average GC content is {2}%.'.format(len(genesofinterest), len(seqs), mean(GCs))
print 'Their average length is {0}.'.format(mean(lengths))
outfh.close()
return seqs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--ens2short', type = str, help = 'File of tab delimited ENSGENEIDs and gene short names.')
parser.add_argument('--genes', type = str, help = 'List of genes for which you want the 3\' UTRs.')
parser.add_argument('--UTRgff', type = str, help = '3\'UTR coordinates in gff format. mm9_ensGene.3putrs.gff, for example.')
parser.add_argument('--genomefasta', type = str, help = 'Genome sequence in fasta format.')
parser.add_argument('--output', type = str, help = 'Output file.')
args = parser.parse_args()
ens2short = nameconversion(args.ens2short)
UTRs = getlongestUTRs(args.UTRgff, ens2short)
seqs = getsequences(UTRs, args.genomefasta, args.genes)
outfh = open(args.output, 'w')
for UTR in seqs:
outfh.write('>' + UTR + '\n')
outfh.write(seqs[UTR] + '\n')
outfh.close()
|
[
"taliaferrojm@gmail.com"
] |
taliaferrojm@gmail.com
|
2a0bfa5609aca2dfc02a924a2145644180b42c3b
|
a08d85552ed0db1a906c3b31ed99f56bae857c60
|
/arguments.py
|
fb8b8bce971e780a8a1f147fee8b65881ca343c2
|
[] |
no_license
|
MagdalenaZZ/Python_ditties
|
90866e53f9aafa603f05735e2ceb094cf5518a18
|
757d8de1df0e53d38d4ba9854b092eabe6ec6570
|
refs/heads/master
| 2023-02-20T12:23:09.778092
| 2023-02-07T10:06:55
| 2023-02-07T10:06:55
| 136,293,051
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,753
|
py
|
#!/usr/bin/env python
"""
Nice descriptive header
"""
import sys
import argparse
# Make sure you are working on the right version of Python
if sys.version_info[0] == 3:
print ('\nCAVA does not run on Python 3.\n')
quit()
# Command line argument parsing
parser = argparse.ArgumentParser(description='Process some integers.')
descr = 'OpEx (Optimised Exome) pipeline ' + ver + '.'
parser = OptionParser(usage='python opex.py <options>', version=ver, description=descr)
parser.add_option('-i', "--input", default=None, dest='fastq', action='store', help="fastq.gz files")
parser.add_option('-o', "--output", default=None, dest='name', action='store', help="Sample name (output prefix)")
parser.add_option('-b', "--bed", default=None, dest='bed', action='store', help="Bed file")
parser.add_option('-r', "--reference", default=None, dest='reference', action='store', help="Reference genome file")
parser.add_option('-t', "--threads", default=1, dest='threads', action='store', help="Number of processes to use")
parser.add_option('-f', "--full", default=False, dest='full', action='store_true',help="Output full CoverView output [default value: %default]")
parser.add_option('-c', "--config", default=None, dest='config', action='store', help="Configuration file")
parser.add_option('-k', "--keep", default=False, dest='keep', action='store_true', help="Keep temporary files")
parser.add_option('-h', "--help", default=None, help="This is a help message")
(options, args) = parser.parse_args()
checkInputs(options)
args = parser.parse_args()
# complain if something is missing
if not 'REFERENCE' in params.keys():
if options.reference is None:
print ('Error: no reference genom provided.')
quit()
|
[
"magz@MacBook-Air.local"
] |
magz@MacBook-Air.local
|
9b11c4878c83539c204e7440be80bbd367f71458
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_noisy1396.py
|
97706fc4f887aef70259b2716a81166bb629ea23
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,349
|
py
|
# qubit number=5
# total number=54
import cirq
import qiskit
from qiskit.providers.aer import QasmSimulator
from qiskit.test.mock import FakeVigo
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[0]) # number=41
prog.cz(input_qubit[1],input_qubit[0]) # number=42
prog.h(input_qubit[0]) # number=43
prog.z(input_qubit[1]) # number=37
prog.cx(input_qubit[1],input_qubit[0]) # number=38
prog.h(input_qubit[4]) # number=21
prog.x(input_qubit[2]) # number=39
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=51
prog.cz(input_qubit[3],input_qubit[0]) # number=52
prog.h(input_qubit[0]) # number=53
prog.h(input_qubit[0]) # number=48
prog.cz(input_qubit[3],input_qubit[0]) # number=49
prog.h(input_qubit[0]) # number=50
prog.z(input_qubit[3]) # number=46
prog.cx(input_qubit[3],input_qubit[0]) # number=47
prog.x(input_qubit[4]) # number=40
prog.cx(input_qubit[3],input_qubit[0]) # number=35
prog.x(input_qubit[0]) # number=9
prog.cx(input_qubit[0],input_qubit[1]) # number=29
prog.x(input_qubit[1]) # number=30
prog.cx(input_qubit[0],input_qubit[1]) # number=31
prog.x(input_qubit[2]) # number=11
prog.x(input_qubit[1]) # number=44
prog.x(input_qubit[3]) # number=12
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.cx(input_qubit[1],input_qubit[0]) # number=24
prog.x(input_qubit[0]) # number=25
prog.cx(input_qubit[1],input_qubit[0]) # number=26
prog.x(input_qubit[1]) # number=14
prog.x(input_qubit[2]) # number=15
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
prog.x(input_qubit[1]) # number=22
prog.y(input_qubit[1]) # number=32
prog.x(input_qubit[1]) # number=23
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
backend = FakeVigo()
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_noisy1396.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
4adc810e449ce9f1cc6ddb43d8bdd3fa31a43e0a
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/deeva/generation.py
|
34c4783703c53c851a289ebe8e2be7391365a386
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,838
|
py
|
# Deeva - Character Generation Platform
# Copyright (C) 2018 Fabrizio Nunnari
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import List
from typing import Tuple
import pandas
class AttributesTable:
"""Support class to load and manage attributes.
Sample table format:
id,name,type,min,max,labels
277,Cheeks_Mass,nc,0.2,0.8,N/A
287,Chin_Prominence,nc,0.0,1.0,N/A
300,Eyebrows_Angle,nc,0.0,1.0,N/A
323,Eyes_Size,nc,0.1,1.0,N/A
"""
def __init__(self, table_filename: str):
self._table = pandas.read_csv(filepath_or_buffer=table_filename)
self._table.set_index('id', inplace=True)
# print(self._table)
def attributes_count(self) -> int:
return len(self._table)
def attribute_ids(self) -> List[int]:
return [int(i) for i in self._table.index]
def attribute_names(self) -> List[str]:
return [s for s in self._table['name']]
def attribute_name(self, attr_id: int) -> str:
return self._table.loc[attr_id]['name']
def attribute_range(self, attr_id: int) -> Tuple[float, float]:
entry = self._table.loc[attr_id]
return entry['min'], entry['max']
class IndividualsTable:
"""Support class to load and manage individuals of a generation.
Sample table format:
id,creation_type,has_content_files,277,287,300,323
35,rm,False,0.35,1.0,0.5,0.775
36,rm,False,0.575,0.75,0.875,0.55
37,rm,False,0.425,0.75,0.625,0.6625
"""
FIRST_ATTRIBUTE_INDEX = 2
def __init__(self, individuals_filename):
self._table = pandas.read_csv(filepath_or_buffer=individuals_filename) # type: pandas.DataFrame
self._table.set_index('id', inplace=True)
def count(self) -> int:
return len(self._table)
def ids(self) -> List[int]:
return [int(i) for i in self._table.index]
def attribute_ids(self) -> List[int]:
return [int(i) for i in self._table.columns.values[IndividualsTable.FIRST_ATTRIBUTE_INDEX:].tolist()]
def attribute_values(self, individual_id: int) -> List[float]:
table_line = self._table.loc[individual_id]
attrib_values = table_line[IndividualsTable.FIRST_ATTRIBUTE_INDEX:] # self._table.loc[individual_id]
return [float(a) for a in attrib_values]
#
#
#
# Invoke register if started from editor
if __name__ == "__main__":
print("Test attrs")
import os
from deeva.generation_tools import create_mblab_chars_json_dir
print(os.getcwd())
attributes_tab = AttributesTable("../../BlenderScenes/VS-1-testvarset1.csv")
print(attributes_tab.attributes_count())
print(attributes_tab.attribute_ids())
print(attributes_tab.attribute_names())
for a in attributes_tab.attribute_ids():
print(attributes_tab.attribute_name(a))
print(attributes_tab.attribute_range(a))
print("")
# create_random_individuals(attributes_table=attributes_tab, num_individuals=30, out_filename="individuals2.csv", random_segments=9)
indiv_tab = IndividualsTable("../../BlenderScenes/individuals2-fake.csv")
print(indiv_tab._table)
create_mblab_chars_json_dir(individuals=indiv_tab, attributes=attributes_tab, dirpath="generated_indiv")
print("end.")
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
435e46730f8c4d4c153b4262319dceef250fa076
|
5c94e032b2d43ac347f6383d0a8f0c03ec3a0485
|
/Push2/transport_state.py
|
3c64a9d2e39ef49634256f34ad79259e83fd83ec
|
[] |
no_license
|
Elton47/Ableton-MRS-10.1.13
|
997f99a51157bd2a2bd1d2dc303e76b45b1eb93d
|
54bb64ba5e6be52dd6b9f87678ee3462cc224c8a
|
refs/heads/master
| 2022-07-04T01:35:27.447979
| 2020-05-14T19:02:09
| 2020-05-14T19:02:09
| 263,990,585
| 0
| 0
| null | 2020-05-14T18:12:04
| 2020-05-14T18:12:03
| null |
UTF-8
|
Python
| false
| false
| 2,961
|
py
|
# uncompyle6 version 3.6.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 2.7.17 (default, Dec 23 2019, 21:25:33)
# [GCC 4.2.1 Compatible Apple LLVM 11.0.0 (clang-1100.0.33.16)]
# Embedded file name: /Users/versonator/Jenkins/live/output/Live/mac_64_static/Release/python-bundle/MIDI Remote Scripts/Push2/transport_state.py
# Compiled at: 2020-01-09 15:21:34
from __future__ import absolute_import, print_function, unicode_literals
from ableton.v2.base import listenable_property, listens
from ableton.v2.control_surface import Component
from .real_time_channel import RealTimeDataComponent
COUNT_IN_DURATION_IN_BARS = (0, 1, 2, 4)
class TransportState(Component):
count_in_duration = listenable_property.managed(0)
def __init__(self, song=None, *a, **kw):
super(TransportState, self).__init__(*a, **kw)
self._song = song
self.__on_is_playing_changed.subject = song
self._count_in_time_real_time_data = RealTimeDataComponent(parent=self, channel_type='count-in')
self.__on_count_in_duration_changed.subject = song
self.__on_is_counting_in_changed.subject = song
self.__on_signature_numerator_changed.subject = song
self.__on_signature_denominator_changed.subject = song
self.__on_count_in_channel_changed.subject = self._count_in_time_real_time_data
self._update_count_in_duration()
@listenable_property
def count_in_real_time_channel_id(self):
return self._count_in_time_real_time_data.channel_id
@listenable_property
def is_counting_in(self):
return self._song.is_counting_in
@listenable_property
def signature_numerator(self):
return self._song.signature_numerator
@listenable_property
def signature_denominator(self):
return self._song.signature_denominator
def _update_count_in_duration(self):
self.count_in_duration = COUNT_IN_DURATION_IN_BARS[self._song.count_in_duration]
@listens('count_in_duration')
def __on_count_in_duration_changed(self):
if not self.is_counting_in:
self._update_count_in_duration()
@listens('is_counting_in')
def __on_is_counting_in_changed(self):
self._count_in_time_real_time_data.set_data(self._song if self.is_counting_in else None)
self.notify_is_counting_in()
self._update_count_in_duration()
return
@listens('signature_numerator')
def __on_signature_numerator_changed(self):
self.notify_signature_numerator()
@listens('signature_denominator')
def __on_signature_denominator_changed(self):
self.notify_signature_denominator()
@listenable_property
def is_playing(self):
return self._song.is_playing
@listens('is_playing')
def __on_is_playing_changed(self):
self.notify_is_playing()
@listens('channel_id')
def __on_count_in_channel_changed(self):
self.notify_count_in_real_time_channel_id()
|
[
"ahmed.emerah@icloud.com"
] |
ahmed.emerah@icloud.com
|
4521d6a7244f51fec11bcd32f5d1d1be2dcbf08e
|
cb3d1b072391b07ef0e9596df7f223f37683e970
|
/[0451]_Sort_Characters_By_Frequency/Sort_Characters_By_Frequency.py
|
c7229d1bbe77b54189179161cb4098bd1bbdf7ed
|
[] |
no_license
|
kotori233/LeetCode
|
99620255a64c898457901602de5db150bc35aabb
|
996f9fcd26326db9b8f49078d9454fffb908cafe
|
refs/heads/master
| 2021-09-10T18:00:56.968949
| 2018-03-30T14:38:27
| 2018-03-30T14:38:27
| 103,036,334
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 339
|
py
|
class Solution(object):
def frequencySort(self, s):
"""
:type s: str
:rtype: str
"""
sheet = {}
for i in s:
sheet[i] = sheet.get(i, 0) + 1
res = ''
for key, val in sorted(sheet.items(), key=lambda x: -x[1]):
res += (key * val)
return res
|
[
"cycycy3333@163.com"
] |
cycycy3333@163.com
|
698dcbdbeec179200cec56754b3c09345c37f0c9
|
374b6fb00fe8b01a04964759ed5f7d97fc6f001f
|
/manage.py
|
038e35696d7fc80e53672839ace8de010809fbeb
|
[] |
no_license
|
Zoxon470/cleverbots
|
2f45730073955a8e5b8e569778305bbc9bb7af90
|
67db087f2d1e00976bd466155bc32e3815d7bdc8
|
refs/heads/master
| 2022-12-11T09:26:24.817876
| 2019-06-29T04:56:54
| 2019-06-29T04:56:54
| 183,275,969
| 1
| 0
| null | 2022-12-08T05:04:59
| 2019-04-24T17:26:47
|
Python
|
UTF-8
|
Python
| false
| false
| 830
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'config.settings.settings')
try:
from django.core.management import execute_from_command_line
except ImportError:
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
# This allows easy placement of apps within the interior
# taxi_corp directory.
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, 'backend'))
execute_from_command_line(sys.argv)
|
[
"zoxon470@gmail.com"
] |
zoxon470@gmail.com
|
8ec6d8887b07d24192c626f26d8eaab0b8db1f3a
|
651a296c8f45b5799781fd78a6b5329effe702a0
|
/rnglib/cg_memory.py
|
4cdecab1c27aa25a499c94878f8eea4233a7a639
|
[] |
no_license
|
pdhhiep/Computation_using_Python
|
095d14370fe1a01a192d7e44fcc81a52655f652b
|
407ed29fddc267950e9860b8bbd1e038f0387c97
|
refs/heads/master
| 2021-05-29T12:35:12.630232
| 2015-06-27T01:05:17
| 2015-06-27T01:05:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,550
|
py
|
#!/usr/bin/env python
def cg_memory ( i, g, cg1, cg2 ):
#*****************************************************************************80
#
## CG_MEMORY stores the CG values for all generators.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 27 May 2013
#
# Author:
#
# John Burkardt
#
# Parameters:
#
# Input, integer I, the desired action.
# -1, get a value.
# 0, initialize all values.
# 1, set a value.
#
# Input, integer G, for I = -1 or +1, the index of
# the generator, with 1 <= G <= 32.
#
# Input/output, integer CG1, CG2. For I = -1,
# these are output, for I = +1, these are input, for I = 0,
# these arguments are ignored. When used, the arguments are
# old or new values of the CG parameter for generator G.
#
from sys import exit
g_max = 32
if ( g < 1 or g_max < g ):
print ''
print 'CG_MEMORY - Fatal error!'
print ' Input generator index G is out of bounds.'
exit ( 'CG_MEMORY - Fatal error!' )
if ( i < 0 ):
cg1 = cg_memory.cg1_save[g-1]
cg2 = cg_memory.cg2_save[g-1]
elif ( i == 0 ):
for i in range ( 1, g_max + 1 ):
cg_memory.cg1_save[i-1] = 0
cg_memory.cg2_save[i-1] = 0
cg1 = 0
cg2 = 0
elif ( 0 < i ):
cg_memory.cg1_save[g-1] = cg1
cg_memory.cg2_save[g-1] = cg2
return cg1, cg2
cg_memory.cg1_save = [ \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
cg_memory.cg2_save = [ \
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ]
|
[
"siplukabir@gmail.com"
] |
siplukabir@gmail.com
|
9258465aa0d433f9acebe02ace571cd240004f9f
|
7bc54bae28eec4b735c05ac7bc40b1a8711bb381
|
/src/trainer_v2/per_project/transparency/splade_regression/runner/run_splade_regression_fit.py
|
4bd23e7ff5f343d8e1095a96d635abed5616e6d9
|
[] |
no_license
|
clover3/Chair
|
755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e
|
a2102ebf826a58efbc479181f1ebb5de21d1e49f
|
refs/heads/master
| 2023-07-20T17:29:42.414170
| 2023-07-18T21:12:46
| 2023-07-18T21:12:46
| 157,024,916
| 0
| 0
| null | 2023-02-16T05:20:37
| 2018-11-10T21:55:29
|
Python
|
UTF-8
|
Python
| false
| false
| 1,773
|
py
|
import logging
import sys
import tensorflow as tf
from transformers import AutoTokenizer
from misc_lib import path_join
from trainer_v2.custom_loop.train_loop_helper import get_strategy_from_config
from trainer_v2.per_project.transparency.splade_regression.data_loaders.dataset_factories import \
get_vector_regression_dataset
from trainer_v2.per_project.transparency.splade_regression.modeling.regression_modeling import get_transformer_sparse_encoder
from trainer_v2.train_util.arg_flags import flags_parser
from taskman_client.wrapper3 import report_run3
from trainer_v2.chair_logging import c_log
from trainer_v2.custom_loop.run_config2 import get_run_config2, RunConfig2
@report_run3
def main(args):
c_log.info("Start {}".format(__file__))
c_log.setLevel(logging.DEBUG)
run_config: RunConfig2 = get_run_config2(args)
run_config.print_info()
strategy = get_strategy_from_config(run_config)
model_config = {
"model_type": "distilbert-base-uncased",
}
vocab_size = AutoTokenizer.from_pretrained(model_config["model_type"]).vocab_size
dataset_info = {
"max_seq_length": 256,
"max_vector_indices": 512,
"vocab_size": vocab_size
}
def build_dataset():
input_files = run_config.dataset_config.train_files_path
return get_vector_regression_dataset(input_files, dataset_info, run_config, True)
with strategy.scope():
new_model = get_transformer_sparse_encoder(model_config, True)
new_model.compile(loss="MSE", optimizer="adam")
dataset = build_dataset()
train_steps = 10000
new_model.fit(dataset, epochs=1, steps_per_epoch=train_steps)
if __name__ == "__main__":
args = flags_parser.parse_args(sys.argv[1:])
main(args)
|
[
"lesterny@gmail.com"
] |
lesterny@gmail.com
|
28462c56603bddf4c7c0c0a3a53ba040c1d5cf98
|
288a00d2ab34cba6c389b8c2444455aee55a8a95
|
/tests/data23/recipe-519639.py
|
c34e06b386bf393e68d454f3b4c3206f3e367c16
|
[
"BSD-2-Clause"
] |
permissive
|
JohannesBuchner/pystrict3
|
ffd77b7bbc378bd4d8f21b5c6bd69a0d64a52ddb
|
18b0dd369082422f9bf0f89c72e7acb53a49849c
|
refs/heads/master
| 2023-08-14T06:37:37.954880
| 2023-07-13T11:16:38
| 2023-07-13T11:16:38
| 268,571,175
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,891
|
py
|
#!/usr/bin/env python
"""
True Lieberman-style delegation in Python.
Proxies are usually implemented as objects that forward method calls to a
"target" object. This approach has a major problem: forwarding makes the target
object the receiver of the method call; this means that calls originating from
the body of a method in the target will not go through the proxy (and thus their
behavior cannot be modified by the proxy).
For example, suppose we want a proxy to an instance of Target (shown below)
that is "safe", i.e., does not do anything bad like firing missiles. We can
just define a class that forwards calls to the safe methods, namely
send_flowers() and hang_out(). This class can have its own version of
fire_missiles() that does nothing. Now consider what happens when we call
the proxy object's innocent-looking hang_out() method. The call is forwarded
to the target object, which in turn calls the target object's (not the
proxy's) fire_missiles() method, and BOOM! (The proxy's version of
fire_missiles() is not called because forwarding has made the target object
the receiver of the new method call.)
By using delegation, one can implement proxies without the drawbacks of the
method-forwarding approach. This recipe shows how Python's __getattr__
method can be used to implement the kind of delegation present in
prototype-based languages like Self and Javascript, and how delegation can
be used to implement better proxies.
"""
__authors__ = ('Alessandro Warth <awarth@cs.ucla.edu>',
'Martin Blais <blais@furius.ca>',)
class Target(object):
def __init__(self, n):
self.n = n
def send_flowers(self):
print('Sending %d flowers from %s' % (self.n, self))
def fire_missiles(self):
print('Firing %d missiles! from %s' % (self.n, self))
def hang_out(self):
# Oops! This is not as innocent as it looks!
print('Hang out... not so innocently.')
self.fire_missiles()
t = Target(17)
"""
Given 't', can we make a proxy to it that avoids firing missiles?
"""
import new
from types import MethodType
class Proxy(object):
def __init__(self, target):
self._target = target
def __getattr__(self, aname):
target = self._target
f = getattr(target, aname)
if isinstance(f, MethodType):
# Rebind the method to the target.
return new.instancemethod(f.__func__, self, target.__class__)
else:
return f
class SafeProxy(Proxy):
"Override dangerous methods of the target."
def fire_missiles(self):
pass
print('--------')
p = SafeProxy(t)
p.send_flowers()
p.hang_out()
class SafeProxy2(Proxy):
"Override more methods, wrapping two proxies deep."
def send_flowers(self):
print('Sending MORE and MORE flowers: %s' % self.n)
print('--------')
p2 = SafeProxy2(p)
p2.send_flowers()
|
[
"johannes.buchner.acad@gmx.com"
] |
johannes.buchner.acad@gmx.com
|
f883cbe229b7a5431271f2695882c1f0a45dc9a1
|
71cac038fabbc61602dbafb3ecddefbe132362aa
|
/Mining/Executables/package_integrity_test.py
|
714ae7813f2e6a41d54d501a18f28fb59439a762
|
[
"MIT"
] |
permissive
|
AdamSwenson/TwitterProject
|
00a38f18d6c5c6146f8ff21917da456c87c5453d
|
8c5dc7a57eac611b555058736d609f2f204cb836
|
refs/heads/master
| 2022-12-14T07:06:43.958429
| 2019-10-02T01:22:22
| 2019-10-02T01:22:22
| 138,349,359
| 0
| 0
|
MIT
| 2022-11-22T02:58:09
| 2018-06-22T21:23:31
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 342
|
py
|
"""
Created by adam on 6/22/18
"""
__author__ = 'adam'
if __name__ == '__main__':
import environment
from Mining.AccessManagement import TwitterLogin
from Mining.UserQueries import UserFinder
from CommonTools.FileTools import CsvFileTools
from CommonTools.Loggers import SlackNotifications
print('I like stuff ')
|
[
"adam.swenson@csun.edu"
] |
adam.swenson@csun.edu
|
10e2ad332a4ab24121f1f3de9d8d610a87ad6421
|
6b2a8dd202fdce77c971c412717e305e1caaac51
|
/solutions_5648941810974720_1/Python/lbj/A.py
|
fc68beb5e6f1eb1db1d76e181fd7a1244032b118
|
[] |
no_license
|
alexandraback/datacollection
|
0bc67a9ace00abbc843f4912562f3a064992e0e9
|
076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf
|
refs/heads/master
| 2021-01-24T18:27:24.417992
| 2017-05-23T09:23:38
| 2017-05-23T09:23:38
| 84,313,442
| 2
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,008
|
py
|
from collections import Counter
from sys import stderr
t = int(input())
def sf(vs):
for i in range(10):
for _ in range(vs[i]):
yield i
for cn in range(t):
s = raw_input()
cnt = Counter(s)
vals = [0 for i in range(10)]
# Linear programming / Gauss-Jordan reduction
vals[0] = cnt['Z']
vals[2] = cnt['W']
vals[4] = cnt['U']
vals[6] = cnt['X']
vals[8] = cnt['G']
vals[5] = cnt['F'] - vals[4]
vals[9] = cnt['I'] - vals[5] - vals[6] - vals[8]
vals[7] = cnt['V'] - vals[5]
vals[3] = cnt['H'] - vals[8]
vals[1] = cnt['O'] - vals[0] - vals[2] - vals[4]
# Santiy checking
assert vals[3] + vals[8] == cnt['H']
assert vals[0] + vals[1] + vals[3] + vals[3] + vals[5] + vals[7] + vals[7] + vals[8] + vals[9] == cnt['E'], "Expected %d, got %d: %r %r" % (cnt['E'], vals[0] + vals[1] + vals[3] + vals[3] + vals[5] + vals[7] + vals[7] + vals[8] + vals[9], cnt, vals)
print("Case #%d: %s" % (cn + 1, ''.join(map(str, sf(vals)))))
|
[
"alexandra1.back@gmail.com"
] |
alexandra1.back@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.