hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
899a7241af6e0b8cdd621ac196abec7ab6dcaa11 | 1,085 | py | Python | tests/test_post.py | DerrickOdhiambo/Flask-IP3 | 9861d49152bced9d8ddda7783628451e5af10323 | [
"MIT"
] | null | null | null | tests/test_post.py | DerrickOdhiambo/Flask-IP3 | 9861d49152bced9d8ddda7783628451e5af10323 | [
"MIT"
] | null | null | null | tests/test_post.py | DerrickOdhiambo/Flask-IP3 | 9861d49152bced9d8ddda7783628451e5af10323 | [
"MIT"
] | null | null | null | import unittest
from app.models import User,Post,Comments
from app import db
class PostTest(unittest.TestCase):
def setUp(self):
"""
Set up method that will run before every Test
"""
self.post= Post(category='Product', content='Yes we can!')
self.user_Derrick = User(username = 'Derrick',password = 'password', email = 'derrick@mail.com')
self.new_comment = Comments(text='This is good', user=self.user_Derrick )
def tearDown(self):
Comments.query.delete()
Post.query.delete()
User.query.delete()
def test_instance(self):
self.assertTrue(isinstance(self.post, Post))
def test_check_instance_variables(self):
self.assertEquals(self.post.category,'Product')
self.assertEquals(self.post.content,'Yes we can!')
self.assertEquals(self.new_comment.text,'This is good')
self.assertEquals(self.new_comment.user,self.user_Derrick)
def test_save_comment(self):
self.new_comment.save_comment()
self.assertTrue(len(Comments.query.all())>0)
| 31.911765 | 104 | 0.669124 | 1,005 | 0.926267 | 0 | 0 | 0 | 0 | 0 | 0 | 178 | 0.164055 |
899b5c413125fdcd4f43aed55ea491b6d8e91b33 | 4,335 | py | Python | ivi/agilent/agilent34410A.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 161 | 2015-01-23T17:43:01.000Z | 2022-03-29T14:42:42.000Z | ivi/agilent/agilent34410A.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 45 | 2015-01-15T13:35:04.000Z | 2021-06-03T01:58:55.000Z | ivi/agilent/agilent34410A.py | sacherjj/python-ivi | 6dd1ba93d65dc30a652a3a1b34c66921d94315e8 | [
"MIT"
] | 87 | 2015-01-31T10:55:23.000Z | 2022-03-17T08:18:47.000Z | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import time
import struct
from .. import ivi
from .. import dmm
from .. import scpi
class agilent34410A(scpi.dmm.Base):
"Agilent 34410A IVI DMM driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '34410A')
super(agilent34410A, self).__init__(*args, **kwargs)
self._memory_size = 5
self._identity_description = "Agilent 34410A/11A IVI DMM driver"
self._identity_identifier = ""
self._identity_revision = ""
self._identity_vendor = ""
self._identity_instrument_manufacturer = "Agilent Technologies"
self._identity_instrument_model = ""
self._identity_instrument_firmware_revision = ""
self._identity_specification_major_version = 4
self._identity_specification_minor_version = 1
self._identity_supported_instrument_models = ['34410A', '34411A']
self._add_method('memory.save',
self._memory_save)
self._add_method('memory.recall',
self._memory_recall)
self._add_method('memory.set_name',
self._set_memory_name)
self._add_method('memory.get_name',
self._get_memory_name)
def _initialize(self, resource = None, id_query = False, reset = False, **keywargs):
"Opens an I/O session to the instrument."
super(agilent34410A, self)._initialize(resource, id_query, reset, **keywargs)
# interface clear
if not self._driver_operation_simulate:
self._clear()
# check ID
if id_query and not self._driver_operation_simulate:
id = self.identity.instrument_model
id_check = self._instrument_id
id_short = id[:len(id_check)]
if id_short != id_check:
raise Exception("Instrument ID mismatch, expecting %s, got %s", id_check, id_short)
# reset
if reset:
self.utility.reset()
def _memory_save(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*sav %d" % index)
def _memory_recall(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("*rcl %d" % index)
def _get_memory_name(self, index):
index = int(index)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
return self._ask("memory:state:name? %d" % index).strip(' "')
def _set_memory_name(self, index, value):
index = int(index)
value = str(value)
if index < 1 or index > self._memory_size:
raise OutOfRangeException()
if not self._driver_operation_simulate:
self._write("memory:state:name %d, \"%s\"" % (index, value))
| 35.532787 | 99 | 0.649135 | 3,102 | 0.715571 | 0 | 0 | 0 | 0 | 0 | 0 | 1,518 | 0.350173 |
899b81f532a64949fdb793eda26c5264788f0f7f | 2,650 | py | Python | python/add_two_numbers.py | babibo180918/leetcode | d9f414beef4bf6d26ec11d2dd925086fd719ab4b | [
"MIT"
] | null | null | null | python/add_two_numbers.py | babibo180918/leetcode | d9f414beef4bf6d26ec11d2dd925086fd719ab4b | [
"MIT"
] | null | null | null | python/add_two_numbers.py | babibo180918/leetcode | d9f414beef4bf6d26ec11d2dd925086fd719ab4b | [
"MIT"
] | null | null | null | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def addTwoNumbers(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
ret = NodeList(0)
val = 0
last = ret
while True:
if l1 is not None:
val += l1.val
l1 = l1.next
if l2 is not None:
val += l2.val
l2 = l2.next
if ret is None:
ret = ListNode(val)
last = ret
else:
last.next = ListNode(val)
last = last.next
if l1 is None and l2 is None and val == 0:
break
if ret is None:
ret = ListNode(0)
return ret
def printList(head):
while head is not None:
print(head.val, end=' ')
head = head.next
print()
def makeList(l):
if len(l) == 0:
return None
head = ListNode(l[0])
cur = head
for a in l[1:]:
cur.next = ListNode(a)
cur = cur.next
return head
sol = Solution()
one=makeList([1,2,3])
two=makeList([4,5,6])
expect=makeList([5,7,9])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
one=makeList([7,2,1])
two=makeList([4,5,6])
expect=makeList([1,8,7])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
one=makeList([2,2,9])
two=makeList([4,5,6])
expect=makeList([6,7,5,1])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
one=makeList([0])
two=makeList([0])
expect=makeList([0])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
one=makeList([1, 3, 4])
two=makeList([5, 2])
expect=makeList([6, 5, 4])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
two=makeList([5, 2])
one=makeList([1, 3, 4])
expect=makeList([6, 5, 4])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
two=makeList([5, 9])
one=makeList([1, 3, 4])
expect=makeList([6, 2, 5])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
two=makeList([2, 3, 0, 1])
one=makeList([1, 3, 0, 4])
expect=makeList([3, 6, 0, 5])
result=sol.addTwoNumbers(one, two)
print('Expect:')
printList(expect)
print('Result:')
printList(result)
| 20.703125 | 54 | 0.58717 | 854 | 0.322264 | 0 | 0 | 0 | 0 | 0 | 0 | 277 | 0.104528 |
899c26a988f10b6faf552e41fc9a11aea4981d87 | 3,313 | py | Python | ConvertCase.py | GuilhermeSenna/Streamlit-Apps | b44dee1369886f2db79404b7ac03c488d2fd0ffd | [
"MIT"
] | null | null | null | ConvertCase.py | GuilhermeSenna/Streamlit-Apps | b44dee1369886f2db79404b7ac03c488d2fd0ffd | [
"MIT"
] | null | null | null | ConvertCase.py | GuilhermeSenna/Streamlit-Apps | b44dee1369886f2db79404b7ac03c488d2fd0ffd | [
"MIT"
] | null | null | null | import SessionState
import base64
import streamlit as st
import pyperclip
# The input stores the string that is in text_area
# The output stores the conversion with the input
# This storage is needed because textarea comes before the button
session_state = SessionState.get(input='', output='', key=0)
# Function used to generate the download
def download_link(object_to_download, download_filename, download_link_text):
# if isinstance(object_to_download, pd.DataFrame):
# object_to_download = object_to_download.to_csv(index=False)
# some strings <-> bytes conversions necessary here
b64 = base64.b64encode(object_to_download.encode()).decode()
return f'<a href="data:file/txt;base64,{b64}" download="{download_filename}">{download_link_text}</a>'
def myfunc(word):
return "".join(w.upper() if i % 2 else w.lower() for i, w in enumerate(word))
def main():
st.title('Text case converter')
# The text Area will appear here
# This instantiation serves to demarcate where it will be
area = st.empty()
col_A_1, col_A_2, col_A_3, col_A_4, col_A_5, col_A_6 = st.beta_columns(6)
if col_A_1.button('lower case'):
session_state.output = session_state.input.lower()
st.success('Lower case applied')
if col_A_2.button('UPPER CASE'):
session_state.output = session_state.input.upper()
st.success('Upper case applied')
if col_A_3.button('Sentence case'):
session_state.output = '. '.join(i.capitalize() for i in session_state.input.split('. '))
st.success('Sentence case applied')
if col_A_4.button('Capitalize Case'):
session_state.output = session_state.input.title()
st.success('Capitalize case applied')
if col_A_5.button('aLtErNaTiNg cAsE'):
session_state.output = myfunc(session_state.input)
st.success('Alternating case applied')
if col_A_6.button('InVeRsE CaSe'):
session_state.output = ''.join(c.lower() if c.isupper() else c.upper() for c in session_state.input)
st.success('Inverse case applied')
st.markdown('---')
col_B_1, col_B_2, col_B_3 = st.beta_columns(3)
if col_B_1.button('Download text'):
tmp_download_link = download_link(session_state.output, 'text.txt', 'Click here to download your text!')
st.markdown(tmp_download_link, unsafe_allow_html=True)
if col_B_2.button('Copy to clipboard'):
pyperclip.copy(session_state.output)
st.success('Text copied to clipboard')
if col_B_3.button('Clear'):
# If the user has not pressed any button, there is no output yet, so it is necessary to increment...
# ...the text_area key to clear completely.
session_state.key += 1
# If it has been pressed, it is necessary to clear the Output to clear the text_area as well.
session_state.output = ''
st.success('Successfully cleared text area')
# The text area is declared here but appears above the buttons
# This is for when you click on one of the buttons to be able to change the content of the text area at run time
session_state.input = area.text_area('Enter text here to be converted', session_state.output,
key=session_state.key)
if __name__ == '__main__':
main() | 37.224719 | 116 | 0.689707 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,419 | 0.428313 |
899e05d154700b28dc0f00552d97ba1b87816851 | 3,474 | py | Python | biblio/my_secondary_verifications.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | biblio/my_secondary_verifications.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | biblio/my_secondary_verifications.py | lokal-profil/isfdb_site | 0ce20d6347849926d4eda961ea9249c31519eea5 | [
"BSD-3-Clause"
] | null | null | null | #!_PYTHONLOC
#
# (C) COPYRIGHT 2020-2021 Ahasuerus
# ALL RIGHTS RESERVED
#
# The copyright notice above does not evidence any actual or
# intended publication of such source code.
#
# Version: $Revision: 571 $
# Date: $Date: 2020-11-19 15:53:08 -0500 (Thu, 19 Nov 2020) $
from isfdb import *
from common import *
from login import *
from SQLparsing import *
if __name__ == '__main__':
start = SESSION.Parameter(0, 'int', 0)
PrintHeader('My Secondary Verifications')
PrintNavbar('my_secondary_verifications', 0, 0, 'my_secondary_verifications.cgi', 0)
user = User()
user.load()
if not user.id:
print '<h3>You must be logged in to view your secondary verifications</h3>'
PrintTrailer('my_secondary_verifications', 0, 0)
sys.exit(0)
per_page = 200
# First select 200 verification IDs -- needs to be done as a separate query since the SQL optimizer
# in MySQL 5.0 is not always smart enough to use all available indices for multi-table queries
query = """select verification.* from verification
where ver_status = 1
and user_id = %d
order by ver_time desc
limit %d, %d""" % (int(user.id), start, per_page)
db.query(query)
result0 = db.store_result()
if result0.num_rows() == 0:
print '<h3>No verifications present</h3>'
PrintTrailer('recentver', 0, 0)
sys.exit(0)
ver = result0.fetch_row()
ver_set = []
while ver:
ver_set.append(ver[0])
ver = result0.fetch_row()
print '<table cellpadding=3 class="generic_table">'
print '<tr class="generic_table_header">'
print '<th>#</th>'
print '<th>Publication Title</th>'
print '<th>Reference</th>'
print '<th>Time</th>'
print '</tr>'
color = 0
count = start
for ver in ver_set:
pub_id = ver[VERIF_PUB_ID]
verifier_id = ver[VERIF_USER_ID]
verification_id = ver[VERIF_REF_ID]
verification_time = ver[VERIF_TIME]
query = """select r.reference_label, p.pub_title
from reference r, pubs p
where r.reference_id = %d
and p.pub_id = %d""" % (verification_id, pub_id)
db.query(query)
result = db.store_result()
record = result.fetch_row()
color = color ^ 1
while record:
count += 1
reference_name = record[0][0]
pub_title = record[0][1]
if color:
print '<tr align=left class="table1">'
else:
print '<tr align=left class="table2">'
print '<td>%d</td>' % count
print '<td>%s</td>' % ISFDBLink('pl.cgi', pub_id, pub_title)
print '<td>%s</td>' % reference_name
print '<td>%s</td>' % verification_time
print '</tr>'
record = result.fetch_row()
print '</table>'
if result0.num_rows() > (per_page - 1):
print '<p> [%s]' % ISFDBLink('my_secondary_verifications.cgi', start + per_page, 'MORE')
PrintTrailer('my_secondary_verifications', 0, 0)
| 35.44898 | 107 | 0.531952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,486 | 0.427749 |
899e62389d7e3e73d0a9cac9e6a927bac137f515 | 1,523 | py | Python | extended_uva_judge/utilities.py | fritogotlayed/Extended-UVA-Judge | 7f4b04052429374d90597757c5bfb2a0a5bfe2ba | [
"MIT"
] | null | null | null | extended_uva_judge/utilities.py | fritogotlayed/Extended-UVA-Judge | 7f4b04052429374d90597757c5bfb2a0a5bfe2ba | [
"MIT"
] | null | null | null | extended_uva_judge/utilities.py | fritogotlayed/Extended-UVA-Judge | 7f4b04052429374d90597757c5bfb2a0a5bfe2ba | [
"MIT"
] | null | null | null | import os
import yaml
from extended_uva_judge import errors
def get_problem_directory(app_config):
"""Gets the directory containing the problem configs.
:return: The path to the problem configs.
:rtype: str
"""
problem_directory = app_config['problem_directory']
if not problem_directory:
raise errors.MissingConfigEntryError('problem_directory')
# Check for full windows or *nix directory path
if not (problem_directory.startswith('/') or ':' in problem_directory):
# assume it's relative to the current working directory
problem_directory = os.path.join(os.getcwd(), problem_directory)
return problem_directory
def get_problem_config(app_config, problem_id):
"""Gets the configuration for this objects corresponding problem.
:return: The configuration for the users selected problem
:rtype: dict
"""
problem_directory = get_problem_directory(app_config)
problem_config_path = os.path.join(
problem_directory, '%s.yaml' % problem_id)
problem_config = yaml.load(open(problem_config_path))
return problem_config
def does_problem_config_exist(app_config, problem_id):
"""Checks to see if the problem configuration exists in the system.
:return: True if it exists, false otherwise
:rtype: bool
"""
problem_directory = get_problem_directory(app_config)
problem_config_path = os.path.join(
problem_directory, '%s.yaml' % problem_id)
return os.path.exists(problem_config_path)
| 29.288462 | 75 | 0.730138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 582 | 0.382141 |
89a03ad3a212cac804b72e4c7b59a7674d2cca45 | 2,729 | py | Python | reshade/pooling/l2norm.py | jamesjiang52/Reshade | ddc87424c50030b4606c4eb5ec61b4be1d4cad98 | [
"MIT"
] | null | null | null | reshade/pooling/l2norm.py | jamesjiang52/Reshade | ddc87424c50030b4606c4eb5ec61b4be1d4cad98 | [
"MIT"
] | null | null | null | reshade/pooling/l2norm.py | jamesjiang52/Reshade | ddc87424c50030b4606c4eb5ec61b4be1d4cad98 | [
"MIT"
] | null | null | null | """
The following classes are defined:
L2NormPoolingNeuron
L2NormPoolingLayer
"""
from math import sqrt
from ..utils.validate import *
from ..utils.flatten import *
class L2NormPoolingNeuron:
"""
Construct a new l2norm pooling neuron. The output takes on the square root
of the sum of the squares of the values in the input image.
Args:
inputs: An object of type Image. The input image.
output: An object of type Connection. The output.
"""
def __init__(self, inputs, output):
validate_dimensions_image(inputs)
self._inputs = flatten_image(inputs)
self._output = output
for input_ in self._inputs:
input_.bind_to(self._update_inputs)
self._update_inputs()
def _update_inputs(self):
self._output.value = sqrt(
sum([input_.value**2 for input_ in self._inputs])
)
class L2NormPoolingLayer:
"""
Construct a new l2norm pooling layer. Each neuron in the layer performs
l2norm pooling on its receptive field in the input layer for the
corresponding output in the output layer.
Args:
inputs: An object of type ConnectionLayer. The input layer.
outputs: An object of type ConnectionLayer. The output layer.
receptive_height: A positive integer. The height of the receptive
field.
receptive_width: A positive integer. The width of the receptive field.
stride_height: A positive integer. The stride height across adjacent
receptive fields.
stride_width: A positive integer. The stride width across adjacent
receptive fields.
"""
def __init__(
self,
inputs,
outputs,
receptive_height,
receptive_width,
stride_height,
stride_width
):
validate_dimensions_layer(inputs)
validate_dimensions_layer(outputs)
validate_receptive_parameters_layer(
inputs,
outputs,
receptive_height,
receptive_width,
stride_height,
stride_width
)
self._inputs = inputs
self._outputs = outputs
self._neurons = [[[
L2NormPoolingNeuron(
[row[x:x + receptive_width]
for row in self._inputs[d][y:y + receptive_height]],
self._outputs[d][y//stride_height][x//stride_width]
)
for x in range(0, len(self._inputs[d][y]) - receptive_width + 1,
stride_width)]
for y in range(0, len(self._inputs[d]) - receptive_height + 1,
stride_height)]
for d in range(len(inputs))]
| 31.011364 | 78 | 0.614877 | 2,549 | 0.934042 | 0 | 0 | 0 | 0 | 0 | 0 | 1,105 | 0.40491 |
89a059a7191529c20325397e8137136f297d1f83 | 57 | py | Python | sisy/__main__.py | qorrect/sisy | 4c279f3a47109395d57521b5c8144b18693737fc | [
"Apache-2.0"
] | 6 | 2017-09-15T03:14:10.000Z | 2019-12-03T04:15:21.000Z | sisy/__main__.py | qorrect/sisy | 4c279f3a47109395d57521b5c8144b18693737fc | [
"Apache-2.0"
] | 2 | 2017-09-21T01:49:42.000Z | 2017-09-23T16:33:01.000Z | sisy/__main__.py | qorrect/sisy | 4c279f3a47109395d57521b5c8144b18693737fc | [
"Apache-2.0"
] | null | null | null | from sisy import ui
if __name__ == "__main__":
ui()
| 11.4 | 26 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.175439 |
89a0e886b154e2d90823978c34e848f5d1927982 | 18,885 | py | Python | scripts/ur5_open_loop.py | lar-deeufba/real_time_grasp | d5ba0253bc42103b3dc5135d345c3a650f0bd64f | [
"BSD-3-Clause"
] | 27 | 2020-05-26T23:48:12.000Z | 2022-01-21T09:33:11.000Z | scripts/ur5_open_loop.py | lar-deeufba/real-time-grasp | d5ba0253bc42103b3dc5135d345c3a650f0bd64f | [
"BSD-3-Clause"
] | null | null | null | scripts/ur5_open_loop.py | lar-deeufba/real-time-grasp | d5ba0253bc42103b3dc5135d345c3a650f0bd64f | [
"BSD-3-Clause"
] | 8 | 2020-05-20T03:29:20.000Z | 2021-12-21T13:40:10.000Z | #!/usr/bin/python
import rospy
import actionlib
import numpy as np
import argparse
import copy
from copy import deepcopy
import rosservice
import sys
import re
from std_msgs.msg import Float64MultiArray, Float32MultiArray
from control_msgs.msg import FollowJointTrajectoryAction, FollowJointTrajectoryGoal, JointTolerance
from sensor_msgs.msg import JointState
from trajectory_msgs.msg import JointTrajectory, JointTrajectoryPoint
from geometry_msgs.msg import Pose
from controller_manager_msgs.srv import SwitchController
# Gazebo
from gazebo_msgs.msg import ModelStates, ContactsState, ContactState, LinkState
from gazebo_msgs.srv import GetLinkState, DeleteModel
from tf import TransformListener, TransformBroadcaster
from tf.transformations import euler_from_quaternion, quaternion_from_euler
# Inverse kinematics
from trac_ik_python.trac_ik import IK
from robotiq_2f_gripper_control.msg import _Robotiq2FGripper_robot_output as outputMsg
CLOSE_GRIPPER_VEL = 0.05
MAX_GRIPPER_CLOSE_INIT = 0.25 # Maximum angle that the gripper should be started using velocity command
PICKING = False # Tells the node that the object must follow the gripper
def parse_args():
parser = argparse.ArgumentParser(description='AAPF_Orientation')
parser.add_argument('--gazebo', action='store_true', help='Set the parameters related to the simulated enviroonment in Gazebo')
args = parser.parse_args()
return args
class vel_control(object):
def __init__(self, args, joint_values = None):
rospy.init_node('command_GGCNN_ur5')
self.args = args
self.joint_values_home = joint_values
self.tf = TransformListener()
# Used to change the controller
self.controller_switch = rospy.ServiceProxy('/controller_manager/switch_controller', SwitchController)
# actionClient used to send joint positions
self.client = actionlib.SimpleActionClient('pos_based_pos_traj_controller/follow_joint_trajectory', FollowJointTrajectoryAction)
print "Waiting for server (pos_based_pos_traj_controller)..."
self.client.wait_for_server()
print "Connected to server (pos_based_pos_traj_controller)"
# Gazebo topics
if self.args.gazebo:
# For picking
self.pub_model_position = rospy.Publisher('/gazebo/set_link_state', LinkState, queue_size=1)
self.get_model_coordinates = rospy.ServiceProxy('/gazebo/get_link_state', GetLinkState)
self.delete_model_service = rospy.ServiceProxy('/gazebo/delete_model', DeleteModel)
rospy.Subscriber('gazebo/model_states', ModelStates, self.get_model_state_callback, queue_size=1)
# Subscriber used to read joint values
rospy.Subscriber('/joint_states', JointState, self.ur5_actual_position_callback, queue_size=1)
rospy.sleep(1.0)
# USED FOR COLLISION DETECTION
self.finger_links = ['robotiq_85_right_finger_tip_link', 'robotiq_85_left_finger_tip_link']
# LEFT GRIPPER
self.string = ""
rospy.Subscriber('/left_finger_bumper_vals', ContactsState, self.monitor_contacts_left_finger_callback) # ContactState
self.left_collision = False
self.contactState_left = ContactState()
# RIGHT GRIPPER
rospy.Subscriber('/right_finger_bumper_vals', ContactsState, self.monitor_contacts_right_finger_callback) # ContactState
self.right_collision = False
self.contactState_right = ContactState()
self.client_gripper = actionlib.SimpleActionClient('gripper_controller_pos/follow_joint_trajectory', FollowJointTrajectoryAction)
print "Waiting for server (gripper_controller_pos)..."
self.client_gripper.wait_for_server()
print "Connected to server (gripper_controller_pos)"
# GGCNN
self.posCB = []
self.ori = []
self.grasp_cartesian_pose = []
self.gripper_angle_grasp = 0.0
self.final_orientation = 0.0
if self.args.gazebo:
self.offset_x = 0.0
self.offset_y = 0.0
self.offset_z = 0.020 #0.019
else:
self.offset_x = -0.03 # 0.002
self.offset_y = 0.02 # -0.05
self.offset_z = 0.058 # 0.013
self.ur5_joint_names = rospy.get_param("/ur5_joint_names")
self.robotiq_joint_name = rospy.get_param("/robotiq_joint_name")
# Topic published from GG-CNN Node
rospy.Subscriber('ggcnn/out/command', Float32MultiArray, self.ggcnn_command_callback, queue_size=1)
# Robotiq control
self.pub_gripper_command = rospy.Publisher('Robotiq2FGripperRobotOutput', outputMsg.Robotiq2FGripper_robot_output, queue_size=1)
self.d = None # msg received from GGCN
self.gripper_max_width = 0.14
def turn_velocity_controller_on(self):
self.controller_switch(['joint_group_vel_controller'], ['pos_based_pos_traj_controller'], 1)
def turn_position_controller_on(self):
self.controller_switch(['pos_based_pos_traj_controller'], ['joint_group_vel_controller'], 1)
def turn_gripper_velocity_controller_on(self):
self.controller_switch(['gripper_controller_vel'], ['gripper_controller_pos'], 1)
def turn_gripper_position_controller_on(self):
self.controller_switch(['gripper_controller_pos'], ['gripper_controller_vel'], 1)
def monitor_contacts_left_finger_callback(self, msg):
if msg.states:
self.left_collision = True
string = msg.states[0].collision1_name
string_collision = re.findall(r'::(.+?)::',string)[0]
# print("Left String_collision: ", string_collision)
if string_collision in self.finger_links:
string = msg.states[0].collision2_name
# print("Left Real string (object): ", string)
self.string = re.findall(r'::(.+?)::', string)[0]
# print("Left before: ", self.string)
else:
self.string = string_collision
# print("Left in else: ", string_collision)
else:
self.left_collision = False
def monitor_contacts_right_finger_callback(self, msg):
if msg.states:
self.right_collision = True
string = msg.states[0].collision1_name
string_collision = re.findall(r'::(.+?)::',string)[0]
# print("Right String_collision: ", string_collision)
if string_collision in self.finger_links:
string = msg.states[0].collision2_name
# print("Right Real string (object): ", string)
self.string = re.findall(r'::(.+?)::',string)[0]
# print("Right before: ", self.string)
else:
self.string = string_collision
# print("Right in else: ", self.string)
else:
self.right_collision = False
def delete_model_service_method(self):
"""
Delete a model in Gazebo
"""
string = self.string
model = string.replace("_link", "")
self.delete_model_service(model)
def ur5_actual_position_callback(self, joint_values_from_ur5):
"""Get UR5 joint angles
The joint states published by /joint_staes of the UR5 robot are in wrong order.
/joint_states topic normally publishes the joint in the following order:
[elbow_joint, shoulder_lift_joint, shoulder_pan_joint, wrist_1_joint, wrist_2_joint, wrist_3_joint]
But the correct order of the joints that must be sent to the robot is:
['shoulder_pan_joint', 'shoulder_lift_joint', 'elbow_joint', 'wrist_1_joint', 'wrist_2_joint', 'wrist_3_joint']
Arguments:
joint_values_from_ur5 {list} -- Actual angles of the UR5 Robot
"""
if self.args.gazebo:
self.th3, self.robotic, self.th2, self.th1, self.th4, self.th5, self.th6 = joint_values_from_ur5.position
# print("Robotic angle: ", self.robotic)
else:
self.th3, self.th2, self.th1, self.th4, self.th5, self.th6 = joint_values_from_ur5.position
self.actual_position = [self.th1, self.th2, self.th3, self.th4, self.th5, self.th6]
def get_model_state_callback(self, msg):
self.object_picking()
def ggcnn_command_callback(self, msg):
"""
GGCNN Command Subscriber Callback
"""
self.tf.waitForTransform("base_link", "object_detected", rospy.Time.now(), rospy.Duration(4.0))
object_pose, object_ori = self.tf.lookupTransform("base_link", "object_detected", rospy.Time(0))
self.d = list(msg.data)
object_pose[0] += self.offset_x
object_pose[1] += self.offset_y
object_pose[2] += self.offset_z
self.posCB = object_pose
self.ori = self.d[3]
br = TransformBroadcaster()
br.sendTransform((object_pose[0],
object_pose[1],
object_pose[2]),
quaternion_from_euler(0.0, 0.0, self.ori),
rospy.Time.now(),
"object_link",
"base_link")
def get_link_position_picking(self):
link_name = self.string
model_coordinates = self.get_model_coordinates(self.string, 'wrist_3_link')
self.model_pose_picking = model_coordinates.link_state.pose
def reset_link_position_picking(self):
self.string = ""
def object_picking(self):
global PICKING
if PICKING:
angle = quaternion_from_euler(1.57, 0.0, 0.0)
object_picking = LinkState()
object_picking.link_name = self.string
object_picking.pose = Pose(self.model_pose_picking.position, self.model_pose_picking.orientation)
object_picking.reference_frame = "wrist_3_link"
self.pub_model_position.publish(object_picking)
def get_ik(self, pose):
"""Get the inverse kinematics
Get the inverse kinematics of the UR5 robot using track_IK package giving a desired intial pose
Arguments:
pose {list} -- A pose representing x, y and z
Returns:
sol {list} -- Joint angles or None if track_ik is not able to find a valid solution
"""
camera_support_angle_offset = 0.0
q = quaternion_from_euler(0.0, -3.14 + camera_support_angle_offset, 0.0)
# Joint order:
# ('shoulder_link', 'upper_arm_link', 'forearm_link', 'wrist_1_link', 'wrist_2_link', 'wrist_3_link', 'grasping_link')
ik_solver = IK("base_link", "grasping_link", solve_type="Distance")
sol = ik_solver.get_ik([0.2201039360819781, -1.573845095552878, -1.521853400505349, -1.6151347051274518, 1.5704492904506875, 0.0],
pose[0], pose[1], pose[2], q[0], q[1], q[2], q[3])
if sol is not None:
sol = list(sol)
sol[-1] = 0.0
return sol
def __build_goal_message_ur5(self):
goal = FollowJointTrajectoryGoal()
goal.trajectory = JointTrajectory()
goal.trajectory.joint_names = self.ur5_joint_names
goal.goal_tolerance.append(JointTolerance('joint_tolerance', 0.1, 0.1, 0))
goal.goal_time_tolerance = rospy.Duration(5,0)
return goal
def traj_planner(self, cart_pos, grasp_step='move', way_points_number=10, movement='slow'):
"""Quintic Trajectory Planner
Publish a trajectory to UR5 using quintic splines.
Arguments:
cart_pos {[float]} -- Grasp position [x, y, z]
Keyword Arguments:
grasp_step {str} -- Set UR5 movement type (default: {'move'})
way_points_number {number} -- Number of points considered in trajectory (default: {10})
movement {str} -- Movement speed (default: {'slow'})
"""
if grasp_step == 'pregrasp':
self.grasp_cartesian_pose = deepcopy(self.posCB)
self.grasp_cartesian_pose[-1] += 0.1
joint_pos = self.get_ik(self.grasp_cartesian_pose)
joint_pos[-1] = self.ori
self.final_orientation = deepcopy(self.ori)
self.gripper_angle_grasp = deepcopy(self.d[-2])
elif grasp_step == 'grasp':
self.grasp_cartesian_pose[-1] -= 0.1
joint_pos = self.get_ik(self.grasp_cartesian_pose)
joint_pos[-1] = self.final_orientation
elif grasp_step == 'move':
joint_pos = self.get_ik(cart_pos)
joint_pos[-1] = 0.0
if movement=='slow':
final_traj_duration = 500.0 # total iteractions
elif movement=='fast':
final_traj_duration = 350.0
v0 = a0 = vf = af = 0
t0 = 5.0
tf = (t0 + final_traj_duration) / way_points_number # tf by way point
t = tf / 10 # for each movement
ta = tf / 10 # to complete each movement
a = [0.0]*6
pos_points, vel_points, acc_points = [0.0]*6, [0.0]*6, [0.0]*6
goal = self.__build_goal_message_ur5()
for i in range(6):
q0 = self.actual_position[i]
qf = joint_pos[i]
b = np.array([q0,v0,a0,qf,vf,af]).transpose()
m = np.array([[1, t0, t0**2, t0**3, t0**4, t0**5],
[0, 1, 2*t0, 3*t0**2, 4*t0**3, 5*t0**4],
[0, 0, 2, 6*t0, 12*t0**2, 20*t0**3],
[1, tf, tf**2, tf**3, tf**4, tf**5],
[0, 1, 2*tf, 3*tf**2, 4*tf**3, 5*tf**4],
[0, 0, 2, 6*tf, 12*tf**2, 20*tf**3]])
a[i] = np.linalg.inv(m).dot(b)
for i in range(way_points_number):
for j in range(6):
pos_points[j] = a[j][0] + a[j][1]*t + a[j][2]*t**2 + a[j][3]*t**3 + a[j][4]*t**4 + a[j][5]*t**5
vel_points[j] = a[j][1] + 2*a[j][2]*t + 3*a[j][3]*t**2 + 4*a[j][4]*t**3 + 5*a[j][5]*t**4
acc_points[j] = 2*a[j][2] + 6*a[j][3]*t + 12*a[j][4]*t**2 + 20*a[j][5]*t**3
goal.trajectory.points.append(JointTrajectoryPoint(positions=pos_points,
velocities=vel_points,
accelerations=acc_points,
time_from_start=rospy.Duration(t))) #default 0.1*i + 5
t += ta
self.client.send_goal(goal)
self.all_close(joint_pos)
def all_close(self, goal, tolerance=0.00005):
"""Wait until goal is reached in configuration space
This method check if the robot reached goal position since wait_for_result seems to be broken
Arguments:
goal {[list]} -- Goal in configuration space (joint values)
Keyword Arguments:
tolerance {number} -- Minimum error allowed to consider the trajectory completed (default: {0.00005})
"""
error = np.sum([(self.actual_position[i] - goal[i])**2 for i in range(6)])
rospy.loginfo("Waiting for trajectory.")
while not rospy.is_shutdown() and error > tolerance:
error = np.sum([(self.actual_position[i] - goal[i])**2 for i in range(6)])
if error < tolerance:
rospy.loginfo("Trajectory Suceeded.") # whithin the tolerance specified
else:
rospy.logerr("Trajectory aborted.")
def genCommand(self, char, command, pos=None):
"""
Update the command according to the character entered by the user.
"""
if char == 'a':
# command = outputMsg.Robotiq2FGripper_robot_output();
command.rACT = 1 # Gripper activation
command.rGTO = 1 # Go to position request
command.rSP = 255 # Speed
command.rFR = 150 # Force
if char == 'r':
command.rACT = 0
if char == 'c':
command.rACT = 1
command.rGTO = 1
command.rATR = 0
command.rPR = 255
command.rSP = 40
command.rFR = 150
# @param pos Gripper width in meters. [0, 0.087]
if char == 'p':
command.rACT = 1
command.rGTO = 1
command.rATR = 0
command.rPR = int(np.clip((13.-230.)/self.gripper_max_width * self.ori + 230., 0, 255))
command.rSP = 40
command.rFR = 150
if char == 'o':
command.rACT = 1
command.rGTO = 1
command.rATR = 0
command.rPR = 0
command.rSP = 40
command.rFR = 150
return command
def command_gripper(self, action):
command = outputMsg.Robotiq2FGripper_robot_output();
command = self.genCommand(action, command)
self.pub_gripper_command.publish(command)
def gripper_send_position_goal(self, position=0.3, velocity=0.4, action='move'):
"""Send position goal to the gripper
Keyword Arguments:
position {float} -- Gripper angle (default: {0.3})
velocity {float} -- Gripper velocity profile (default: {0.4})
action {str} -- Gripper movement (default: {'move'})
"""
self.turn_gripper_position_controller_on()
duration = 0.2
if action == 'pre_grasp_angle':
max_distance = 0.085
angular_coeff = 0.11
K = 1.3
angle = (max_distance - self.gripper_angle_grasp) / angular_coeff * K
position = angle
velocity = 0.4
elif action == 'pick':
position = 0.7
velocity = 0.05
duration = 8.0
goal = FollowJointTrajectoryGoal()
goal.trajectory = JointTrajectory()
goal.trajectory.joint_names = self.robotiq_joint_name
goal.trajectory.points.append(JointTrajectoryPoint(positions=[position],
velocities=[velocity],
accelerations=[0.0],
time_from_start=rospy.Duration(duration)))
self.client_gripper.send_goal(goal)
if action == 'pick':
while not rospy.is_shutdown() and not self.left_collision and not self.right_collision:
pass
self.client_gripper.cancel_goal()
def move_home_on_shutdown(self):
self.client.cancel_goal()
# self.client_gripper.cancel_goal()
rospy.loginfo("Shutting down node...")
def main():
global PICKING
arg = parse_args()
ur5_vel = vel_control(arg)
point_init_home = [-0.37, 0.11, 0.15]
joint_values_home = ur5_vel.get_ik(point_init_home)
ur5_vel.joint_values_home = joint_values_home
# Send the robot to the custom HOME position
raw_input("==== Press enter to 'home' the robot!")
rospy.on_shutdown(ur5_vel.move_home_on_shutdown)
ur5_vel.traj_planner(point_init_home, movement='fast')
# Remove all objects from the scene and press enter
raw_input("==== Press enter to move the robot to the 'depth cam shot' position!")
point_init = [-0.37, 0.11, 0.05]
ur5_vel.traj_planner(point_init, movement='fast')
if arg.gazebo:
rospy.loginfo("Starting the gripper in Gazebo! Please wait...")
ur5_vel.gripper_send_position_goal(0.4)
else:
rospy.loginfo("Starting the real gripper! Please wait...")
ur5_vel.command_gripper('r')
rospy.sleep(0.5)
ur5_vel.command_gripper('a')
ur5_vel.command_gripper('o')
while not rospy.is_shutdown():
raw_input("==== Press enter to move to the pre grasp position!")
ur5_vel.traj_planner(point_init, 'pregrasp', movement='fast')
# It closes the gripper before approaching the object
# It prevents the gripper to collide with other objects when grasping
raw_input("==== Press enter start the grasping process!")
if arg.gazebo:
ur5_vel.gripper_send_position_goal(action='pre_grasp_angle')
else:
ur5_vel.command_gripper('p')
# Generate the trajectory to the grasp position
# BE CAREFUL!
ur5_vel.traj_planner([], 'grasp', movement='slow')
rospy.loginfo("Picking object")
if arg.gazebo:
ur5_vel.gripper_send_position_goal(action='pick')
ur5_vel.get_link_position_picking()
else:
raw_input("==== Press enter to close the gripper!")
ur5_vel.command_gripper('c')
rospy.loginfo("Moving object to the bin")
# After a collision is detected, the arm will start the picking action
PICKING = True # Attach object
ur5_vel.traj_planner([-0.45, 0.0, 0.15], movement='fast')
ur5_vel.traj_planner([-0.45, -0.16, 0.15], movement='fast')
ur5_vel.traj_planner([-0.45, -0.16, 0.08], movement='slow') # Be careful when approaching the bin
rospy.loginfo("Placing object")
# After the bin location is reached, the robot will place the object and move back
# to the initial position
PICKING = False # Detach object
if arg.gazebo:
ur5_vel.gripper_send_position_goal(0.3)
ur5_vel.delete_model_service_method()
ur5_vel.reset_link_position_picking()
else:
ur5_vel.command_gripper('o')
rospy.loginfo("Moving back to home position")
ur5_vel.traj_planner([-0.45, -0.16, 0.15], movement='fast')
ur5_vel.traj_planner(point_init_home, movement='fast')
ur5_vel.traj_planner(point_init, movement='slow')
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
print "Program interrupted before completion"
| 35.365169 | 133 | 0.714112 | 14,602 | 0.773206 | 0 | 0 | 0 | 0 | 0 | 0 | 5,921 | 0.313529 |
89a1c74f6ff7daea200b8e6b6423ed86d7bc83ec | 2,936 | py | Python | certbot_dns_corenetworks/test/unit/test_dns_corenetwork.py | xoxys/certbot-dns-ispconfig | 8d5c0a9df72342ce55db6469d10cdd69ce3bb024 | [
"MIT"
] | 1 | 2021-03-19T00:29:39.000Z | 2021-03-19T00:29:39.000Z | certbot_dns_corenetworks/test/unit/test_dns_corenetwork.py | xoxys/certbot-dns-ispconfig | 8d5c0a9df72342ce55db6469d10cdd69ce3bb024 | [
"MIT"
] | 6 | 2020-11-11T21:32:23.000Z | 2021-03-20T16:00:17.000Z | certbot_dns_corenetworks/test/unit/test_dns_corenetwork.py | thegeeklab/certbot-dns-corenetworks | 8d5c0a9df72342ce55db6469d10cdd69ce3bb024 | [
"MIT"
] | null | null | null | """Tests for certbot_dns_corenetworks.dns_corenetworks."""
import unittest
import mock
from certbot import errors
from certbot.compat import os
from certbot.plugins import dns_test_common
from certbot.plugins.dns_test_common import DOMAIN
from certbot.tests import util as test_util
API_USER = "my_user"
API_PASSWORD = "secure"
class AuthenticatorTest(test_util.TempDirTestCase, dns_test_common.BaseAuthenticatorTest):
"""Test for Hetzner DNS Authenticator."""
def setUp(self):
from certbot_dns_corenetworks.dns_corenetworks import Authenticator
super(AuthenticatorTest, self).setUp()
path = os.path.join(self.tempdir, "file.ini")
dns_test_common.write({
"corenetworks_username": API_USER,
"corenetworks_password": API_PASSWORD
}, path)
self.config = mock.MagicMock(
corenetworks_credentials=path, corenetworks_propagation_seconds=0
) # don't wait during tests
self.auth = Authenticator(self.config, "corenetworks")
self.mock_client = mock.MagicMock()
self.auth._get_corenetworks_client = mock.MagicMock(return_value=self.mock_client)
def test_perform(self):
self.auth.perform([self.achall])
expected = [
mock.call.add_txt_record(DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY, mock.ANY)
]
self.assertEqual(expected, self.mock_client.mock_calls)
def test_cleanup(self):
self.auth.nameCache["_acme-challenge." + DOMAIN] = "_acme-challenge." + DOMAIN
self.auth._attempt_cleanup = True
self.auth.cleanup([self.achall])
expected = [mock.call.del_txt_record(DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY)]
self.assertEqual(expected, self.mock_client.mock_calls)
def test_creds(self):
dns_test_common.write({
"corenetworks_username": API_USER,
"corenetworks_password": API_PASSWORD
}, self.config.corenetworks_credentials)
self.auth.perform([self.achall])
expected = [
mock.call.add_txt_record(DOMAIN, "_acme-challenge." + DOMAIN, mock.ANY, mock.ANY)
]
self.assertEqual(expected, self.mock_client.mock_calls)
def test_no_creds(self):
dns_test_common.write({}, self.config.corenetworks_credentials)
self.assertRaises(errors.PluginError, self.auth.perform, [self.achall])
def test_missing_user_or_password(self):
dns_test_common.write({"corenetworks_username": API_USER},
self.config.corenetworks_credentials)
self.assertRaises(errors.PluginError, self.auth.perform, [self.achall])
dns_test_common.write({"corenetworks_password": API_PASSWORD},
self.config.corenetworks_credentials)
self.assertRaises(errors.PluginError, self.auth.perform, [self.achall])
if __name__ == "__main__":
unittest.main()
| 35.373494 | 93 | 0.68733 | 2,553 | 0.86955 | 0 | 0 | 0 | 0 | 0 | 0 | 403 | 0.137262 |
89a2045cc532ad4eb8b579a9b93af35ce91d0c11 | 5,068 | py | Python | examples/monitor.py | zhuxuanxuan23/ksc-sdk-python | 7159953c36cab0ca2eeb27fbc337f96cce0771cd | [
"Apache-2.0"
] | 53 | 2016-09-21T15:52:14.000Z | 2021-12-23T09:23:00.000Z | examples/monitor.py | zhuxuanxuan23/ksc-sdk-python | 7159953c36cab0ca2eeb27fbc337f96cce0771cd | [
"Apache-2.0"
] | 27 | 2016-09-21T15:24:43.000Z | 2021-11-18T08:38:38.000Z | examples/monitor.py | zhuxuanxuan23/ksc-sdk-python | 7159953c36cab0ca2eeb27fbc337f96cce0771cd | [
"Apache-2.0"
] | 68 | 2016-09-06T10:33:09.000Z | 2021-11-16T07:13:03.000Z | # -*- encoding:utf-8 -*-
from kscore.session import get_session
import json
if __name__ == "__main__":
s = get_session()
client = s.create_client("monitor", "cn-beijing-6", use_ssl=True)
clientv2 = s.create_client("monitorv2", "cn-beijing-6", use_ssl=True)
'''
通用产品线,不包含容器(docker)
'''
#ListMetrics
m = client.list_metrics(InstanceID="293bbbc1-6c27-4567-89fc-xxxxx",
Namespace="kec",
PageIndex="1",
PageSize="10")
print(json.dumps(m, sort_keys=True, indent=4))
#GetMetricStatistics
#m = client.get_metric_statistics(
# InstanceID="ef6eaa98-8e2b-4629-98e0-xxxxx",
# Namespace="eip",
# MetricName="eip.bps.in",
# StartTime="2021-09-15T10:09:00Z",
# EndTime="2021-09-15T10:19:00Z",
# Period="60",
# Aggregate="Average,Max,Min")
#print(json.dumps(m, sort_keys=True, indent=4))
#GetMetricStatisticsBatch version=2018-11-14
param = {
"Namespace":
"kec",
"StartTime":
"2021-09-15T10:00:00Z",
"EndTime":
"2021-09-15T10:09:00Z",
"Period":
"180",
"Aggregate": ["Max", "Min", "Avg"],
"Metrics": [{
"InstanceID": "293bbbc1-6c27-4567-89fc-xxxxx",
"MetricName": "net.if.in"
}, {
"InstanceID": "293bbbc1-6c27-4567-89fc-xxxxx",
"MetricName": "cpu.utilizition.total"
}, {
"InstanceID": "6a725f27-1c7e-4704-95c8-xxxxx",
"MetricName": "net.if.out"
}]
}
#m = client.get_metric_statistics_batch_v2(**param)
#print(json.dumps(m, sort_keys=True, indent=4))
'''
只支持容器docker(kce),其余产品线不支持。
'''
#ListMetrics
paraml = {
"Action": "ListMetrics",
"Version": "2019-08-12",
"Namespace": "kce",
"PageIndex": "1",
"PageSize": "10",
"Dimensions.0.Name": "ClusterId",
"Dimensions.0.Value": "807a4149-b7e2-4e05-8a35-xxxxx",
"Dimensions.1.Name": "NamespaceName",
"Dimensions.1.Value": "xxxxx",
"Dimensions.2.Name": "WorkloadType",
"Dimensions.2.Value": "deployment",
"Dimensions.3.Name": "WorkloadName",
"Dimensions.3.Value": "xxxxx",
"Dimensions.4.Name": "PodName",
"Dimensions.4.Value": "xxxxx-xxxxx-xxxxx",
# "Dimensions.5.Name":"ContainerName",
# "Dimensions.5.Value":"xxxxx"
}
#m = client.list_metrics_v3(**paraml)
#print(json.dumps(m, sort_keys=True, indent=4))
#GetMetricStatistics
paramg = {
"Action": "GetMetricStatistics",
"Version": "2019-08-12",
"Namespace": "kce",
"MetricName": "pod.network.rx",
"StartTime": "2021-09-15T10:09:00Z",
"EndTime": "2021-09-15T10:19:00Z",
"Period": "60",
"Aggregate": "Average,Max,Min",
"Dimensions.0.Name": "ClusterId",
"Dimensions.0.Value": "807a4149-b7e2-4e05-8a35-xxxxx",
"Dimensions.1.Name": "NamespaceName",
"Dimensions.1.Value": "xxxxx",
"Dimensions.2.Name": "WorkloadType",
"Dimensions.2.Value": "deployment",
"Dimensions.3.Name": "WorkloadName",
"Dimensions.3.Value": "xxxxx",
"Dimensions.4.Name": "PodName",
"Dimensions.4.Value": "xxxxx",
# "Dimensions.5.Name":"ContainerName",
# "Dimensions.5.Value":"xxxxx"
}
#m = client.get_metric_statistics_v3(**paramg)
#print(json.dumps(m, sort_keys=True, indent=4))
#ListAlarmPolicy
#m = clientv2.list_alarm_policy(PageIndex=1, PageSize=10)
#print(json.dumps(m, sort_keys=True, indent=4))
#DescribeAlarmPolicy
#m = clientv2.describe_alarm_policy(PolicyId=25232)
#print(json.dumps(m, sort_keys=True, indent=4))
#DescribePolicyObject
#m = clientv2.describe_policy_object(PolicyId=25232, PageIndex=1, PageSize=10)
#print(json.dumps(m, sort_keys=True, indent=4))
#DescribeAlarmReceives
#m = clientv2.describe_alarm_receives(PolicyId=25232)
#print(json.dumps(m, sort_keys=True, indent=4))
#AddAlarmReceives
paraml = {
"PolicyId": 25232,
"ContactFlag": 2,
"ContactWay": 3,
"ContactId": [1985, 3607],
}
#m = clientv2.add_alarm_receives(**paraml)
#print(json.dumps(m, sort_keys=True, indent=4))
#DeleteAlarmReceives
paraml = {
"PolicyId": 25232,
"ContactFlag": 2,
"ContactId": [1985, 3607],
}
#m = clientv2.delete_alarm_receives(**paraml)
#print(json.dumps(m, sort_keys=True, indent=4))
#GetUserGroup
#m = clientv2.get_user_group()
#print(json.dumps(m, sort_keys=True, indent=4))
#GetAlertUser
#m = clientv2.get_alert_user(UserGrpId=[879, 1484])
#print(json.dumps(m, sort_keys=True, indent=4))
#UpdateAlertUserStatus
paraml = {
"UserId": [1985, 3607],
"UserStatus": 1,
}
#m = clientv2.update_alert_user_status(**paraml)
#print(json.dumps(m, sort_keys=True, indent=4))
| 31.283951 | 82 | 0.583662 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,510 | 0.684477 |
89a2a4399e10100131e600830518389f72bbc22b | 787 | py | Python | malware_rl/envs/utils/sorel.py | xenoxine/malware_rl | 6a316caae02491404cb5d335735c22a74269e01f | [
"MIT"
] | 61 | 2020-08-28T19:33:07.000Z | 2022-03-26T06:38:29.000Z | malware_rl/envs/utils/sorel.py | xenoxine/malware_rl | 6a316caae02491404cb5d335735c22a74269e01f | [
"MIT"
] | 11 | 2020-09-25T18:59:41.000Z | 2022-01-31T11:39:57.000Z | malware_rl/envs/utils/sorel.py | xenoxine/malware_rl | 6a316caae02491404cb5d335735c22a74269e01f | [
"MIT"
] | 12 | 2020-08-29T01:35:05.000Z | 2022-02-07T02:56:35.000Z | import os
import sys
import lightgbm as lgb
import numpy as np
from malware_rl.envs.utils.ember import PEFeatureExtractor
module_path = os.path.split(os.path.abspath(sys.modules[__name__].__file__))[0]
try:
model_path = os.path.join(module_path, "sorel.model")
except ValueError:
print("The model path provide does not exist")
class SorelModel:
def __init__(self):
self.model = lgb.Booster(model_file=model_path)
self.threshold = 0.8336 # Ember 1% FPR
self.feature_version = 2
self.extractor = PEFeatureExtractor(self.feature_version)
def extract(self, bytez):
return np.array(self.extractor.feature_vector(bytez), dtype=np.float32)
def predict_sample(self, features):
return self.model.predict([features])[0]
| 27.137931 | 79 | 0.717916 | 445 | 0.565438 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.083863 |
89a2ae50c2ab46fa15f409a4fc5b32e4fd3a7ad0 | 732 | py | Python | hummingbot/connector/exchange/bittrex/bittrex_utils.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 3,027 | 2019-04-04T18:52:17.000Z | 2022-03-30T09:38:34.000Z | hummingbot/connector/exchange/bittrex/bittrex_utils.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 4,080 | 2019-04-04T19:51:11.000Z | 2022-03-31T23:45:21.000Z | hummingbot/connector/exchange/bittrex/bittrex_utils.py | joedomino874/hummingbot | cb3ee5a30a2feb0a55ceca9d200c59662d7e3057 | [
"Apache-2.0"
] | 1,342 | 2019-04-04T20:50:53.000Z | 2022-03-31T15:22:36.000Z | from hummingbot.client.config.config_var import ConfigVar
from hummingbot.client.config.config_methods import using_exchange
CENTRALIZED = True
EXAMPLE_PAIR = "ZRX-ETH"
DEFAULT_FEES = [0.25, 0.25]
KEYS = {
"bittrex_api_key":
ConfigVar(key="bittrex_api_key",
prompt="Enter your Bittrex API key >>> ",
required_if=using_exchange("bittrex"),
is_secure=True,
is_connect_key=True),
"bittrex_secret_key":
ConfigVar(key="bittrex_secret_key",
prompt="Enter your Bittrex secret key >>> ",
required_if=using_exchange("bittrex"),
is_secure=True,
is_connect_key=True),
}
| 29.28 | 66 | 0.602459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 170 | 0.23224 |
89a5548ef4bf1d2363f4677b23a160ba481678ba | 1,142 | py | Python | actions/list_members.py | stackstorm-enhancements/stackstorm-github | e2ef6ece6b34e50755ec3174ebbe0a42a16f9941 | [
"Apache-2.0"
] | 1 | 2020-07-23T21:04:39.000Z | 2020-07-23T21:04:39.000Z | actions/list_members.py | ncskier/stackstorm-github | e2ef6ece6b34e50755ec3174ebbe0a42a16f9941 | [
"Apache-2.0"
] | 7 | 2020-07-24T00:09:58.000Z | 2020-08-11T11:47:39.000Z | actions/list_members.py | stackstorm-enhancements/stackstorm-github | e2ef6ece6b34e50755ec3174ebbe0a42a16f9941 | [
"Apache-2.0"
] | null | null | null | import datetime
from github.GithubException import UnknownObjectException
from lib.base import BaseGithubAction
from lib.formatters import user_to_dict
__all__ = [
'ListMembersAction'
]
class ListMembersAction(BaseGithubAction):
def run(self, user, base_url, filter=None, role=None, limit=20):
kwargs = {}
if filter:
kwargs['filter'] = filter
if role:
kwargs['role'] = role
result = []
try:
if base_url == None:
self._reset(user)
else:
self._reset(user+'|'+base_url)
org = self._client.get_organization(user)
members = org.get_members(**kwargs)
members = list(members)
for index, member in enumerate(members):
member = user_to_dict(user=member)
result.append(member)
if (index + 1) >= limit:
break
except UnknownObjectException:
member = self._client.get_user(user)
member = user_to_dict(user=member)
result.append(member)
return result
| 26.55814 | 68 | 0.568301 | 947 | 0.829247 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.031524 |
89a5c2838a442d78a5960f630f0b8be24007cf8e | 1,405 | py | Python | src/calrissian/regularization/regularize_orthogonal.py | awlange/brainsparks | 05baff28da347172083672c940406f5696893201 | [
"MIT"
] | 3 | 2015-10-30T04:04:02.000Z | 2020-02-13T19:08:42.000Z | src/calrissian/regularization/regularize_orthogonal.py | awlange/brainsparks | 05baff28da347172083672c940406f5696893201 | [
"MIT"
] | null | null | null | src/calrissian/regularization/regularize_orthogonal.py | awlange/brainsparks | 05baff28da347172083672c940406f5696893201 | [
"MIT"
] | null | null | null | import numpy as np
class RegularizeOrthogonal(object):
"""
Orthogonal
"""
def __init__(self, coeff_lambda=0.0):
self.coeff_lambda = coeff_lambda
def cost(self, layers):
c = 0.0
for layer in layers:
wt = layer.w.transpose()
for j in range(layer.output_size):
wtj = wt[j] / np.sqrt(wt[j].dot(wt[j]))
for k in range(layer.output_size):
if j == k:
continue
wtk = wt[k] / np.sqrt(wt[k].dot(wt[k]))
c += np.abs(wtj.dot(wtk))
return self.coeff_lambda * c
def cost_gradient(self, layers, dc_db, dc_dw):
for l, layer in enumerate(layers):
wt = layer.w.transpose()
tmp = np.zeros_like(wt)
for j in range(layer.output_size):
dj = np.sqrt(wt[j].dot(wt[j]))
wtj = wt[j] / dj
# TODO: simplify this
s = 2 * (np.eye(len(wtj)) - np.outer(wtj, wtj)) / dj
for k in range(layer.output_size):
if j == k:
continue
dk = np.sqrt(wt[k].dot(wt[k]))
wtk = wt[k] / dk
tmp[j] += wtk.dot(s) * np.sign(wtj.dot(wtk))
dc_dw[l] += self.coeff_lambda * tmp.transpose()
return dc_db, dc_dw
| 31.222222 | 68 | 0.458363 | 1,383 | 0.984342 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.033452 |
89a6fed9b1397a9b14c141ea4b85328623a63382 | 7,839 | py | Python | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/util.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/util.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | null | null | null | data/external/repositories/113677/KaggleBillionWordImputation-master/scripts/util.py | Keesiu/meta-kaggle | 87de739aba2399fd31072ee81b391f9b7a63f540 | [
"MIT"
] | 1 | 2019-12-04T08:23:33.000Z | 2019-12-04T08:23:33.000Z | import sys, bisect
from collections import defaultdict
from itertools import islice, izip
import numpy as np
from scipy.misc import logsumexp
from scipy.spatial import distance
import Levenshtein
PUNCTUATION = set(("'", '"', ',', '.', '!', '?', ';', ':', '-', '--', '(', ')',
'/', '_', '\\', '+', '<', '>', '|', '@', '#', '$', '%', '^',
'&', '*', '[', ']', '{', '}'))
POS_TAGS = set(('UH','WP$','PDT','RBS','LS','EX','WP','$','SYM','RP','CC','RBR','VBG','NNS','CD','PRP$','MD','DT','NNPS','VBD','IN','JJS','WRB','VBN','JJR','WDT','POS','TO','NNP','JJ','RB','VB','FW','PRP','VBZ','NN','VBP'))
UNKNOWN = '<unknown>'
def is_punctuation(word):
return (word in PUNCTUATION)
def is_number(word):
try:
x = float(word)
return True
except:
return False
def is_pos_tag(word):
return (word in POS_TAGS)
def window(seq, n=2):
"Returns a sliding window (of width n) over data from the iterable"
" s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ... "
it = iter(seq)
result = tuple(islice(it, n))
if len(result) == n:
yield result
for elem in it:
result = result[1:] + (elem,)
yield result
def tokenize_words(line, delim=' '):
return line.rstrip().split(delim)
def pos_tag(word):
return word.rsplit('_', 1)[-1]
def ngram_frequencies(istream, n=1):
counts = defaultdict(int)
for i, line in enumerate(istream):
if i % 100000 == 0:
print >>sys.stderr, i
words = tokenize_words(line)
for ngram in window(words, n):
counts[ngram] += 1
return counts
def words2ids(words, idmap):
ids = []
for word in words:
if word not in idmap:
idmap[word] = len(idmap)
ids.append(idmap[word])
return ids
def ngram_frequencies2(istream, n=1):
unigrams = dict()
counts = defaultdict(int)
for i, line in enumerate(istream):
if i % 100000 == 0:
print >>sys.stderr, "Line %d (%d 1-grams, %d %d-grams)" \
% (i, len(unigrams), len(counts), n)
words = tokenize_words(line)
ids = words2ids(words, unigrams)
for ngram in window(ids, n):
counts[ngram] += 1
id2word = {v: k for k, v in unigrams.iteritems()}
del unigrams
return counts, id2word
def load_vocab(vocab_file):
vocab = {}
for line in vocab_file:
word, freq = line.strip().split('\t')
freq = int(freq)
vocab[word] = freq
return vocab
def prune_vocab(vocab, n):
nwords = sum(v for v in vocab.itervalues())
nvocab = len(vocab)
print >>sys.stderr, "Input has nwords = %s, vocab size = %d" \
% (nwords, nvocab)
vocab = [(v,k) for k,v in vocab.iteritems()]
vocab = list(reversed(sorted(vocab)))
vocab = vocab[:n]
vocab = {k: v for v, k in vocab}
nremaining = sum(v for v in vocab.itervalues())
percent_kept = float(len(vocab)) / nvocab
percent_mass = float(nremaining) / nwords
print >>sys.stderr, "Keeping %d words (%.2f%% of vocab, %.2f%% of mass)" \
% (len(vocab), 100*percent_kept, 100*percent_mass)
return vocab
def score(golden, predicted):
total_d = 0.0
n = 0
for ref, pred in izip(golden, predicted):
total_d += Levenshtein.distance(ref, pred)
n += 1
return total_d / n
def estimate_probabilities(ngrams):
# no smoothing; if we didn't see it in train, best not insert
ntotal = float(sum(ngrams.itervalues()))
print "%d total syntactic ngrams" % ntotal
p = {k: np.log10(v/ntotal) for k, v in ngrams.iteritems()}
print "Total probability = %f" % sum(10.**v for v in p.itervalues())
return p
normalize_ngrams = estimate_probabilities
class Word2Vec(object):
def __init__(self, words, V):
self.words = words
self.word_to_id = {w: i for i, w in enumerate(self.words)}
self.V = V
@classmethod
def load(cls, istream):
# first line indicates # words and dimension of vectors
header = istream.readline().rstrip().split()
nwords = int(header[0])
d = int(header[1])
print >>sys.stderr, "Allocating %dx%d word vector matrix" \
% (nwords, d)
words = []
V = np.zeros((nwords,d), dtype=np.float32)
# subsequent lines have word and vector
print >>sys.stderr, "Loading word vectors"
for i, line in enumerate(istream):
entry = line.rstrip().split()
word = entry[0]
words.append(word)
V[i] = map(float, entry[1:])
if i % 500000 == 0: print >>sys.stderr, i
return cls(words, V)
def get(self, word):
'''get vector for word'''
if word not in self.word_to_id:
raise ValueError("Word2Vec does not contain '%s'" % word)
id = self.word_to_id[word]
return self.V[id]
def nearest(self, word, indices=None):
'''yield words in ascending order of distance to @word'''
# compute distance from word to all other words
# too much memory to precompute all of these ahead of time
# and vector dimension is too large for a KD-tree to be much help
word_vec = np.array(self.get(word), ndmin=2)
V = self.V if indices is None else self.V[indices]
d = distance.cdist(word_vec, V)[0]
for i in np.argsort(d):
w = self.words[i]
# element 0 is this word (d=0) if this word is in indices
# but not this word if this word is not in indices
if w == word: continue
yield w
class Prediction(object):
keep_top_n = 5
def __init__(self, word, locations, Z, Z_location, *args):
self.word = word
self.locations = locations
self.Z = Z
self.Z_location = Z_location
self.p_anywhere = args[:self.keep_top_n]
self.p_at_location = args[self.keep_top_n:2*self.keep_top_n]
self.p_at_other_location = args[2*self.keep_top_n:3*self.keep_top_n]
self.p_surrounding = args[3*self.keep_top_n:]
#assert self.p_anywhere[0] == self.p_at_location[0]
#assert self.p_at_location[0] != self.p_at_other_location[0]
@property
def location(self):
return self.locations[0]
@property
def order(self):
return len(self.p_surrounding)
@property
def location_posterior(self):
return 10.**(self.Z_location - self.Z)
@property
def word_posterior(self):
return 10.**(self.p_at_location[0] - self.Z)
@property
def location_ratio(self):
return self.p_at_location[0] - self.p_at_other_location[0]
@property
def word_ratio(self):
return self.p_at_location[0] - self.p_at_location[1]
@classmethod
def parse(cls, line):
entry = line.rstrip().split('\t')
word = entry[0]
# locations
loc = map(int, entry[1:cls.keep_top_n])
# probabilities
for i in xrange(cls.keep_top_n+1, len(entry)):
entry[i] = float(entry[i])
return cls(word, loc, *entry[cls.keep_top_n+1:])
class TopK(object):
'''Keep track the top-k objects'''
def __init__(self, n):
self.things = [None] * n
self.values = [float('inf')] * n
def add(self, thing, value):
i = bisect.bisect(self.values, -value)
if i < len(self.values):
self.values[i] = -value
self.things[i] = thing
def update(self, other):
for thing, value in other:
self.add(thing, -value)
def __iter__(self):
return izip(self.things, self.values) | 33.075949 | 223 | 0.56474 | 4,008 | 0.51129 | 1,053 | 0.134328 | 1,613 | 0.205766 | 0 | 0 | 1,379 | 0.175915 |
89a7d7559cda29135a7e7a156417c45617de279f | 485 | py | Python | Search_DnaA_trios/9_adjust_box_mismatch.py | DongMeiJing/DnaA-trios | 573c8664515989c2f1f53b7fcc1c3fb928aadbce | [
"Apache-2.0"
] | null | null | null | Search_DnaA_trios/9_adjust_box_mismatch.py | DongMeiJing/DnaA-trios | 573c8664515989c2f1f53b7fcc1c3fb928aadbce | [
"Apache-2.0"
] | null | null | null | Search_DnaA_trios/9_adjust_box_mismatch.py | DongMeiJing/DnaA-trios | 573c8664515989c2f1f53b7fcc1c3fb928aadbce | [
"Apache-2.0"
] | null | null | null | import pandas as pd
file = r'8_calculate_spacer_len.csv'
with open(file, 'r') as f:
data = pd.read_csv(f)
for i in data.index:
box_motif = data.at[i, 'box motif']
if '[' in box_motif:
data.at[i, 'Upstream mismatch'] = data.at[i, 'Upstream mismatch'] + 0.5
data.at[i, 'Downstream mismatch'] = data.at[i,
'Downstream mismatch'] + 0.5
data.to_csv('9_adjust_box_mismatch.csv', index=False)
| 30.3125 | 81 | 0.562887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.315464 |
89a8b28d63bb94947dfc6a9846e9c93c4d60b60d | 845 | py | Python | creatDjangoApp1/accounts/migrations/0006_auto_20201130_2119.py | imxiaow/django-xw-project | 06d97b7d3701422b08720e00e87f6c471461c19a | [
"MIT"
] | 1 | 2020-11-25T17:33:31.000Z | 2020-11-25T17:33:31.000Z | creatDjangoApp1/accounts/migrations/0006_auto_20201130_2119.py | imxiaow/django-xw-project | 06d97b7d3701422b08720e00e87f6c471461c19a | [
"MIT"
] | null | null | null | creatDjangoApp1/accounts/migrations/0006_auto_20201130_2119.py | imxiaow/django-xw-project | 06d97b7d3701422b08720e00e87f6c471461c19a | [
"MIT"
] | null | null | null | # Generated by Django 3.1.3 on 2020-11-30 21:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0005_auto_20201130_1450'),
]
operations = [
migrations.AddField(
model_name='order',
name='note',
field=models.CharField(max_length=200, null=True),
),
migrations.AlterField(
model_name='order',
name='status',
field=models.CharField(choices=[('Pending', 'Pending'), ('Out for delivery', 'Out for delivery'), ('Delivered', 'Delivered')], max_length=200, null=True),
),
migrations.AlterField(
model_name='product',
name='description',
field=models.CharField(blank=True, max_length=200, null=True),
),
]
| 29.137931 | 166 | 0.581065 | 752 | 0.889941 | 0 | 0 | 0 | 0 | 0 | 0 | 208 | 0.246154 |
89a8d67890dba5fcad6eb3055086bfd2d086adc8 | 647 | py | Python | HelloWorld/guessinggame.py | zahraaliaghazadeh/python | 2f2d0141a916c99e8724f803bd4e5c7246a7a02e | [
"MIT"
] | null | null | null | HelloWorld/guessinggame.py | zahraaliaghazadeh/python | 2f2d0141a916c99e8724f803bd4e5c7246a7a02e | [
"MIT"
] | null | null | null | HelloWorld/guessinggame.py | zahraaliaghazadeh/python | 2f2d0141a916c99e8724f803bd4e5c7246a7a02e | [
"MIT"
] | null | null | null | # answer = 5
# print("please guess number between 1 and 10: ")
# guess = int(input())
#
# if guess < answer:
# print("Please guess higher")
# elif guess > answer:
# print ( "Please guess lower")
# else:
# print("You got it first time")
answer = 5
print ("Please guess number between 1 and 10: ")
guess = int(input())
# when there is : we indent the code
if guess != answer:
if guess < answer:
print("Please guess higher")
guess = int(input())
if guess == answer:
print("Well done, you guessed it")
else:
print("Sorry, you have not guessed correctly")
else:
print("You got it first time")
| 23.107143 | 54 | 0.615147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 424 | 0.655332 |
89a90718d7e650fc2ad039e8e167881489e64001 | 2,820 | py | Python | spandex/tests/test_io.py | UDST/spandex | 2f485b190d521bc84e9a66d71c8161b5214570d8 | [
"BSD-3-Clause"
] | 21 | 2015-09-24T08:20:13.000Z | 2020-08-10T16:15:03.000Z | spandex/tests/test_io.py | UDST/spandex | 2f485b190d521bc84e9a66d71c8161b5214570d8 | [
"BSD-3-Clause"
] | 3 | 2018-05-22T21:04:48.000Z | 2018-05-30T20:40:44.000Z | spandex/tests/test_io.py | UDST/spandex | 2f485b190d521bc84e9a66d71c8161b5214570d8 | [
"BSD-3-Clause"
] | 14 | 2015-09-21T17:59:02.000Z | 2020-05-06T05:12:40.000Z | import numpy as np
import pandas as pd
from pandas.util import testing as pdt
import pytest
from spandex import TableFrame
from spandex.io import db_to_df, df_to_db
def test_tableframe(loader):
table = loader.tables.sample.hf_bg
for cache in [False, True]:
tf = TableFrame(table, index_col='gid', cache=cache)
assert isinstance(tf.index, pd.Index)
num_rows = len(tf)
assert num_rows > 1
assert set(tf.columns) == set(table.__table__.columns.keys())
for column_name in tf.columns:
if column_name != 'gid':
if cache:
assert column_name not in tf._cached.keys()
assert isinstance(tf[column_name], pd.Series)
if cache:
assert column_name in tf._cached.keys()
assert isinstance(getattr(tf, column_name), pd.Series)
df = tf[['objectid']]
assert isinstance(df, pd.DataFrame)
assert len(df) == num_rows
assert set(df.columns) == set(['objectid'])
assert np.issubdtype(df.objectid.dtype, int)
def test_sim_export(loader):
# Try importing the UrbanSim simulation framework, otherwise skip test.
sim = pytest.importorskip('urbansim.sim.simulation')
# Register input parcels table.
parcels = loader.tables.sample.heather_farms
parcels_in = TableFrame(parcels, index_col='gid')
sim.add_table('parcels_in', parcels_in, copy_col=False)
# Register output parcels table.
@sim.table()
def parcels_out(parcels_in):
return pd.DataFrame(index=parcels_in.parcel_id)
# Specify default table for output columns as decorator.
out = sim.column('parcels_out')
# Specify some output columns.
@out
def apn(apn='parcels_in.puid'):
return apn.groupby(parcels_in.parcel_id).first().astype(str)
@out
def county_id():
return 13
@out
def area(acr='parcels_in.parcel_acr'):
return 4047. * acr.groupby(parcels_in.parcel_id).median()
# Register model to export output table to database.
@sim.model()
def export(parcels_out):
schema = loader.tables.sample
df_to_db(parcels_out.to_frame(), 'parcels_out', schema=schema)
# Inspect output table.
column_names = ['apn', 'county_id', 'area']
parcels_out_df1 = sim.get_table('parcels_out').to_frame()
assert set(parcels_out_df1.columns) == set(column_names)
assert parcels_out_df1.county_id.unique() == [13]
# Export table to database and import back to compare.
sim.run(['export'])
parcels_out_table = loader.tables.sample.parcels_out
parcels_out_df2 = db_to_df(parcels_out_table, index_col='parcel_id')
pdt.assert_frame_equal(parcels_out_df1[column_names],
parcels_out_df2[column_names])
| 34.814815 | 75 | 0.664184 | 0 | 0 | 0 | 0 | 516 | 0.182979 | 0 | 0 | 541 | 0.191844 |
89aa44250287866cbb838c9fe67275c1ccae22e7 | 2,471 | py | Python | cohesity_management_sdk/models/vserver_network_interface.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/vserver_network_interface.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | cohesity_management_sdk/models/vserver_network_interface.py | sachinthakare-cohesity/management-sdk-python | c95f67b7d387d5bab8392be43190e598280ae7b5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
class VserverNetworkInterface(object):
"""Implementation of the 'Vserver Network Interface.' model.
Specifies information about a logical network interface on a
NetApp Vserver. The interface's IP address is the mount point for a
specific data protocol, such as NFS or CIFS.
Attributes:
data_protocols (list of DataProtocolEnum): Array of Data Protocols.
Specifies the set of data protocols supported by this interface.
'kNfs' indicates NFS connections. 'kCifs' indicates SMB (CIFS)
connections. 'kIscsi' indicates iSCSI connections. 'kFc' indicates
Fiber Channel connections. 'kFcache' indicates Flex Cache
connections. 'kHttp' indicates HTTP connections. 'kNdmp' indicates
NDMP connections. 'kManagement' indicates non-data connections
used for management purposes.
ip_address (string): Specifies the IP address of this interface.
name (string): Specifies the name of this interface.
"""
# Create a mapping from Model property names to API property names
_names = {
"data_protocols":'dataProtocols',
"ip_address":'ipAddress',
"name":'name'
}
def __init__(self,
data_protocols=None,
ip_address=None,
name=None):
"""Constructor for the VserverNetworkInterface class"""
# Initialize members of the class
self.data_protocols = data_protocols
self.ip_address = ip_address
self.name = name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
data_protocols = dictionary.get('dataProtocols')
ip_address = dictionary.get('ipAddress')
name = dictionary.get('name')
# Return an object of this model
return cls(data_protocols,
ip_address,
name)
| 33.391892 | 81 | 0.632942 | 2,411 | 0.975718 | 0 | 0 | 850 | 0.34399 | 0 | 0 | 1,738 | 0.703359 |
89ac931b692c43514c36ae03212d4c0af12bbef1 | 4,877 | py | Python | clld/lib/wordpress.py | Woseseltops/clld | 5ba065f35b7e6f68b8638d86550e6f0f597ff02d | [
"MIT"
] | 1 | 2019-08-12T15:43:56.000Z | 2019-08-12T15:43:56.000Z | clld/lib/wordpress.py | Woseseltops/clld | 5ba065f35b7e6f68b8638d86550e6f0f597ff02d | [
"MIT"
] | null | null | null | clld/lib/wordpress.py | Woseseltops/clld | 5ba065f35b7e6f68b8638d86550e6f0f597ff02d | [
"MIT"
] | null | null | null | """
Client for the xmlrpc API of a wordpress blog.
.. note::
we ignore blog_id altogether, see
http://joseph.randomnetworks.com/archives/2008/06/10/\
blog-id-in-wordpress-and-xml-rpc-blog-apis/
thus, rely on identifying the appropriate blog by xmlrpc endpoint.
"""
import re
import xmlrpclib
import requests
XMLRPC_PATH = 'xmlrpc.php'
def sluggify(phrase):
"""
>>> assert sluggify('a and B') == 'a-and-b'
"""
phrase = phrase.lower().strip()
phrase = re.sub('\s+', '-', phrase)
return phrase
class Client(object):
"""client to a wpmu blog
provides a unified interface to functionality called over xmlrpc or plain http
>>> c = Client('blog.example.org', 'user', 'password')
>>> assert c.service_url == 'http://blog.example.org/xmlrpc.php'
"""
def __init__(self, url, user, password):
self.user = user
self.password = password
if not url.startswith('http://') and not url.startswith('https://'):
url = 'http://' + url
if not url.endswith(XMLRPC_PATH):
if not url.endswith('/'):
url += '/'
url += XMLRPC_PATH
self.service_url = url
self.server = xmlrpclib.Server(self.service_url)
self.base_url = self.service_url.replace(XMLRPC_PATH, '')
def get_post(self, id): # pragma: no cover
return self.server.metaWeblog.getPost(id, self.user, self.password)
def get_authors(self): # pragma: no cover
return self.server.wp.getAuthors(0, self.user, self.password)
def get_recent_posts(self, number_of_posts): # pragma: no cover
return self.server.metaWeblog.getRecentPosts(
0, self.user, self.password, number_of_posts)
def create_post(self,
title,
content,
categories=None,
published=False,
date=None,
tags='',
custom_fields=None,
**kwargs):
published = [xmlrpclib.False, xmlrpclib.True][int(published)]
struct = dict(title=title, description=content)
if date:
struct['date_created_gmt'] = date
struct['dateCreated'] = date
if tags:
if isinstance(tags, (list, tuple)):
tags = ','.join(tags)
struct['mt_keywords'] = tags
if custom_fields is not None:
struct['custom_fields'] = [
dict(key=key, value=value) for key, value in custom_fields.items()]
struct.update(kwargs)
post_id = self.server.metaWeblog.newPost(
'', self.user, self.password, struct, published)
if categories:
self.set_categories(categories, post_id)
return post_id
def get_categories(self, name=None):
res = []
for c in self.server.wp.getCategories('', self.user, self.password):
if name:
if c['categoryName'] == name:
res.append(c)
else:
res.append(c)
for c in res:
c['name'] = c['categoryName']
c['id'] = c['categoryId']
return res
def set_categories(self, categories, post_id=None):
existing_categories = dict(
[(c['categoryName'], c) for c in self.get_categories()])
cat_map = {}
for cat in categories:
if cat['name'] not in existing_categories:
struct = dict(name=cat['name'])
for attr in ['parent_id', 'description', 'slug']:
if attr in cat:
struct[attr] = cat[attr]
cat_map[cat['name']] = int(
self.server.wp.newCategory('', self.user, self.password, struct))
else:
cat_map[cat['name']] = int(existing_categories[cat['name']]['id'])
if post_id:
self.server.mt.setPostCategories(
post_id,
self.user,
self.password,
[dict(categoryId=cat_map[name]) for name in cat_map])
return cat_map
def get_post_id_from_path(self, path):
"""
pretty hacky way to determine whether some post exists
"""
if not path.startswith(self.base_url):
path = self.base_url + path
res = requests.get(path)
if res.status_code != 200:
return None
m = re.search(
'\<input type\="hidden" name\="comment_post_ID" value\="(?P<id>[0-9]+)" \/\>',
res.text)
if m:
return int(m.group('id'))
else:
p = '\<div\s+class\=\"post\"\s+id\=\"post\-(?P<id>[0-9]+)\"\>'
if len(re.findall(p, res.text)) == 1:
m = re.search(p, res.text)
return int(m.group('id'))
| 34.34507 | 90 | 0.541726 | 4,335 | 0.888866 | 0 | 0 | 0 | 0 | 0 | 0 | 1,116 | 0.228829 |
89ad3ad12dbbb7451941460b3c2d3ac8663e01da | 1,370 | py | Python | deprecated_examples_robust/multimedia/avmnist_MFM_robust.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | deprecated_examples_robust/multimedia/avmnist_MFM_robust.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | deprecated_examples_robust/multimedia/avmnist_MFM_robust.py | TianhaoFu/MultiBench | b174a3187124d6f92be1ff3b487eef292f7883bb | [
"MIT"
] | null | null | null | import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from training_structures.MFM import train_MFM,test_MFM
from fusions.common_fusions import Concat
from unimodals.MVAE import LeNetEncoder,DeLeNet
from unimodals.common_models import MLP
from torch import nn
import torch
from objective_functions.recon import recon_weighted_sum,sigmloss1dcentercrop
from datasets.avmnist.get_data_robust import get_dataloader
filename='avmnist_MFM_robust_best.pt'
traindata, validdata, testdata, robustdata = get_dataloader('../../../../yiwei/avmnist/_MFAS/avmnist')
channels=6
classes=10
n_latent=200
fuse=Concat()
encoders=[LeNetEncoder(1,channels,3,n_latent,twooutput=False).cuda(),LeNetEncoder(1,channels,5,n_latent,twooutput=False).cuda()]
decoders=[DeLeNet(1,channels,3,n_latent).cuda(),DeLeNet(1,channels,5,n_latent).cuda()]
intermediates=[MLP(n_latent,n_latent//2,n_latent//2).cuda(),MLP(n_latent,n_latent//2,n_latent//2).cuda(),MLP(2*n_latent,n_latent,n_latent//2).cuda()]
head=MLP(n_latent//2,40,classes).cuda()
recon_loss=recon_weighted_sum([sigmloss1dcentercrop(28,34),sigmloss1dcentercrop(112,130)],[1.0,1.0])
train_MFM(encoders,decoders,head,intermediates,fuse,recon_loss,traindata,validdata,25,savedir=filename)
model=torch.load(filename)
print("Testing:")
test_MFM(model,testdata)
print("Robustness testing:")
test(model,testdata)
| 37.027027 | 149 | 0.806569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 100 | 0.072993 |
89b0fc36687c8a1bd3480464b1aa7545c9ee615d | 2,327 | py | Python | src/JyCnTwTranslator.py | jackychu0830/jy-cn-tw-translattor | 8ee4a367ba35fe5af43d07fc865bf6eea9ef4864 | [
"Apache-2.0"
] | 3 | 2021-04-10T03:48:34.000Z | 2021-07-21T08:39:54.000Z | src/JyCnTwTranslator.py | jackychu0830/jy-cn-tw-translattor | 8ee4a367ba35fe5af43d07fc865bf6eea9ef4864 | [
"Apache-2.0"
] | null | null | null | src/JyCnTwTranslator.py | jackychu0830/jy-cn-tw-translattor | 8ee4a367ba35fe5af43d07fc865bf6eea9ef4864 | [
"Apache-2.0"
] | 5 | 2021-04-27T06:45:59.000Z | 2022-03-28T12:44:49.000Z | import json
import os
from pygoogletranslation import Translator
JY_PATH = os.path.expanduser('~') + '/Movies/JianyingPro/videocut/'
def get_video_texts(filename):
"""
Get all text content from video
:param filename: file name
:return: all subtitle in list format
"""
f = open(filename, encoding='utf-8')
txt = f.read()
f.close()
json_obj = json.loads(txt)
texts = []
for text in json_obj.get('materials').get('texts'):
texts.append(text.get('content'))
return texts
def get_video_names(filename):
"""
Get all text content from video
:param filename: file name
:return: video name
"""
f = open(filename, encoding='utf-8')
txt = f.read()
f.close()
json_obj = json.loads(txt)
name = json_obj.get('draft_name')
return name
def set_video_texts(new_texts, filename):
"""
Write translated texts back to video
:param new_texts: Translated texts
:param filename: file name
"""
f = open(filename, encoding='utf-8')
txt = f.read()
f.close()
json_obj = json.loads(txt)
for i in range(0, len(new_texts)):
# print(json_obj['materials']['texts'][i]['content'] + ' -> ' + new_texts[i])
json_obj['materials']['texts'][i]['content'] = new_texts[i]
with open(filename, 'w', encoding='utf8') as json_file:
json.dump(json_obj, json_file, ensure_ascii=False)
def do_translate(cn_texts):
"""
Do translate from CN to TW
:param cn_texts: Texts in CN
:return: Texts in TW
"""
translator = Translator()
result = translator.translate(cn_texts, src='zh-cn', dest='zh-tw')
tw_texts = [r.text for r in result]
return tw_texts
def do_single_translate(cn_text):
"""
Do single text translate from CN to TW
:param cn_text: Text in CN
:return: Text in TW
"""
translator = Translator()
result = translator.translate(cn_text, src='zh-cn', dest='zh-tw')
text = result.text
return text
def main():
jy_video = input('Please input video id: ')
filename = JY_PATH + jy_video + '/template.json'
cn_texts = get_video_texts(filename)
print(cn_texts)
tw_texts = do_translate(cn_texts)
print(tw_texts)
set_video_texts(tw_texts, filename)
if __name__=="__main__":
main()
| 22.375 | 85 | 0.634293 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 845 | 0.363128 |
89b1a7985acccd182bc360d4370b40a665ed42c6 | 1,352 | py | Python | pygame_template.py | Pittsy24/pygame_template | b72397a37610fc267870151e0141de0d15793b7c | [
"MIT"
] | null | null | null | pygame_template.py | Pittsy24/pygame_template | b72397a37610fc267870151e0141de0d15793b7c | [
"MIT"
] | null | null | null | pygame_template.py | Pittsy24/pygame_template | b72397a37610fc267870151e0141de0d15793b7c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# By Joseph Pitts
import sys
import random
import pygame
from pygame.locals import *
class Colours:
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
AQUA = (0, 255, 255)
GREY = (128, 128, 128)
NAVY = (0, 0, 128)
SILVER = (192, 192 ,192)
GREEN = (0, 128, 0)
OLIVE = (128, 128, 0)
TEAL = (0, 128, 128)
BLUE = (0, 0, 255)
LIME = (0, 255, 0)
PURPLE = (128, 0, 128)
FUCHSIA = (255, 0, 255)
MAROON = (128, 0, 0)
RED = (255, 0, 0)
YELLOW = (255, 255, 0)
@staticmethod
def RANDOM():
return (random.randrange(0, 255), random.randrange(0, 255), random.randrange(0, 255))
class PyGame(object):
def __init__(self, width = 640, height = 480):
pygame.init()
self.fps = 60
self.fpsClock = pygame.time.Clock()
self.width, self.height = width, height
self.screen = pygame.display.set_mode((self.width, self.height))
def game_loop(self):
self.screen.fill(Colours.BLACK)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
pygame.display.flip()
fpsClock.tick(fps)
def run(self):
while True:
self.game_loop()
| 23.719298 | 94 | 0.513314 | 1,217 | 0.900148 | 0 | 0 | 127 | 0.093935 | 0 | 0 | 37 | 0.027367 |
89b39f158be90401b4ca2e91fcd18a6a3a99d8f3 | 799 | py | Python | Python3/0234-Palindrome-Linked-List/soln-1.py | wyaadarsh/LeetCode-Solutions | 3719f5cb059eefd66b83eb8ae990652f4b7fd124 | [
"MIT"
] | 5 | 2020-07-24T17:48:59.000Z | 2020-12-21T05:56:00.000Z | Python3/0234-Palindrome-Linked-List/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | null | null | null | Python3/0234-Palindrome-Linked-List/soln-1.py | zhangyaqi1989/LeetCode-Solutions | 2655a1ffc8678ad1de6c24295071308a18c5dc6e | [
"MIT"
] | 2 | 2020-07-24T17:49:01.000Z | 2020-08-31T19:57:35.000Z | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if not head or not head.next:
return True
slow, fast = head, head
while fast and fast.next:
slow, fast = slow.next, fast.next.next
if fast:
slow = slow.next
pre = None
while slow:
temp = slow.next
slow.next = pre
pre = slow
slow = temp
while pre and head:
if pre.val != head.val:
return False
pre, head = pre.next, head.next
return True | 26.633333 | 50 | 0.490613 | 656 | 0.821026 | 0 | 0 | 0 | 0 | 0 | 0 | 202 | 0.252816 |
89b442a2aa83173bfb33cd89737f11e8446730f6 | 3,336 | py | Python | dataset/ucmayo4.py | GorkemP/labeled-images-for-ulcerative-colitis | 83dd4221e9bb6f4a441cafb6ddd74dad0d5f0e55 | [
"MIT"
] | 2 | 2022-03-15T19:59:15.000Z | 2022-03-17T07:37:08.000Z | dataset/ucmayo4.py | GorkemP/labeled-images-for-ulcerative-colitis | 83dd4221e9bb6f4a441cafb6ddd74dad0d5f0e55 | [
"MIT"
] | null | null | null | dataset/ucmayo4.py | GorkemP/labeled-images-for-ulcerative-colitis | 83dd4221e9bb6f4a441cafb6ddd74dad0d5f0e55 | [
"MIT"
] | null | null | null | import torch
from torch.utils.data import Dataset
from PIL import Image
import os
import glob
class UCMayo4(Dataset):
"""Ulcerative Colitis dataset grouped according to Endoscopic Mayo scoring system"""
def __init__(self, root_dir, transform=None):
"""
root_dir (string): Path to parent folder where class folders are located.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.class_names = []
self.samples = []
self.transform = transform
subFolders = glob.glob(os.path.join(root_dir, "*"))
subFolders.sort()
for folder in subFolders:
className = folder.split("/")[-1]
self.class_names.append(className)
self.number_of_class = len(self.class_names)
for folder in subFolders:
className = folder.split("/")[-1]
image_paths = glob.glob(os.path.join(folder, "*"))
for image_path in image_paths:
image = Image.open(image_path)
image.load()
self.samples.append((image, self.class_names.index(className)))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_image = self.samples[idx][0].copy()
if self.transform:
sample_image = self.transform(sample_image)
return (sample_image, self.samples[idx][1])
class UCMayo4Remission(Dataset):
"""
Ulcerative Colitis dataset grouped according to Endoscopic Mayo scoring system
According to the remission list given in constructor, it has binary output for annotation.
"""
def __init__(self, root_dir, remission=[2, 3], transform=None):
"""
Args:
root_dir (string): Path to parent folder where class folders are located.
resmission (list): Mayo scores (as int) that will be regarded as non-remission state.
transform (callable, optional): Optional transform to be applied on a sample.
"""
self.number_of_class = 2
self.class_names = []
self.samples = []
self.transform = transform
subFolders = glob.glob(os.path.join(root_dir, "*"))
subFolders.sort()
for folder in subFolders:
className = folder.split("/")[-1]
self.class_names.append(className)
for folder in subFolders:
className = folder.split("/")[-1]
image_paths = glob.glob(os.path.join(folder, "*"))
for image_path in image_paths:
image = Image.open(image_path)
image.load()
label = 0
if self.class_names.index(className) in remission:
label = 1
self.samples.append((image, label))
def __len__(self):
return len(self.samples)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
sample_image = self.samples[idx][0].copy()
# TODO since all images are loaded at constructor, transform can be moved there too
if self.transform:
sample_image = self.transform(sample_image)
return (sample_image, self.samples[idx][1])
| 32.38835 | 97 | 0.601019 | 3,238 | 0.970624 | 0 | 0 | 0 | 0 | 0 | 0 | 883 | 0.264688 |
89b51e36ce9bdb63aea671028674c6ceed809072 | 698 | py | Python | Python Programs/binarysea.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 77 | 2020-10-01T10:06:59.000Z | 2021-11-08T08:57:18.000Z | Python Programs/binarysea.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 46 | 2020-09-27T04:55:36.000Z | 2021-05-14T18:49:06.000Z | Python Programs/binarysea.py | Chibi-Shem/Hacktoberfest2020-Expert | 324843464aec039e130e85a16e74b76d310f1497 | [
"MIT"
] | 327 | 2020-09-26T17:06:03.000Z | 2021-10-09T06:04:39.000Z | print("linear search")
si=int(input("\nEnter the size:"))
data=list()
for i in range(0,si):
n=int(input())
data.append(n)
cot=0
print("\nEnter the number you want to search:")
val=int(input())
for i in range(0,len(data)):
if(data[i]==val):
break;
else:
cot=cot+1
print(cot)#linear search result=4
#binary search
print("\nBinary Search")
cot=0
beg=0
end=len(data)
mid=(beg+end)/2
mid=int(mid)
while beg<end and val!=data[mid]:
if val>data[mid]:
beg=mid+1
else:
end=mid-1
mid=int((beg+end)/2)
cot=cot+1
if 14==data[mid]:
print("\nDATA FOUND")
print(cot)
| 21.151515 | 48 | 0.54298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 144 | 0.206304 |
89b658139c87c2cd2b91a286d57e79315f172227 | 6,240 | py | Python | inferqueue.py | AndreasMerentitis/TfLambdaDemo-tfraw | 1fa3d7b170076b8afd5065f680ee305f232fbbf5 | [
"MIT"
] | null | null | null | inferqueue.py | AndreasMerentitis/TfLambdaDemo-tfraw | 1fa3d7b170076b8afd5065f680ee305f232fbbf5 | [
"MIT"
] | 5 | 2020-09-26T01:25:45.000Z | 2022-02-10T02:13:54.000Z | inferqueue.py | AndreasMerentitis/TfLambdaDemo-tfraw | 1fa3d7b170076b8afd5065f680ee305f232fbbf5 | [
"MIT"
] | null | null | null | try:
import unzip_requirements
except ImportError:
pass
import json
import os
import tarfile
import boto3
import tensorflow as tf
import numpy as np
import census_data
import logging
logger = logging.getLogger()
logger.setLevel(logging.WARNING)
FILE_DIR = '/tmp/'
BUCKET = os.environ['BUCKET']
import queue
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor
q = queue.Queue()
lambda_client = boto3.client('lambda')
def feed_the_workers(datapoints, spacing):
""" Outside actors sending in work to do """
count = 0
for datapoint in datapoints:
print(spacing)
count = count + 1
print(count)
q.put(datapoint)
return "DONE FEEDING"
def process_one_datapoint(executor, payload_one_item):
""" Process a single item """
payload_one_item_json = {}
payload_one_item_json['input'] = [payload_one_item]
payload_one_item_json['epoch'] = ''
payload_one_item_json = json.dumps(payload_one_item_json)
logging.warning('payload_one_item_json from process_one_datapoint is %s', payload_one_item_json)
predictions = executor.submit(lambda_client.invoke(
FunctionName='tflambdademo-dev-infer',
InvocationType='RequestResponse',
LogType='Tail',
Payload=payload_one_item_json)
)
logging.warning('predictions raw from process_one_datapoint is %s', predictions)
#logging.warning('predictions result from process_one_datapoint is %s', predictions.result())
responseFromChild = json.load(predictions['Payload'])
logging.warning('responseFromChild is %s', responseFromChild)
return responseFromChild
def inferqueueHandler(event, context):
body = json.loads(event.get('body'))
# Read in prediction data as dictionary
# Keys should match _CSV_COLUMNS, values should be lists
predict_input = body['input']
logging.warning('predict_input type is %s', type(predict_input))
logging.warning('predict_input is %s', predict_input)
# Read in epoch
epoch_files = body['epoch']
logging.warning('epoch_files is %s', epoch_files)
if isinstance(predict_input, list):
predict_datapoints = predict_input
else:
predict_datapoints = [predict_input]
logging.warning('predict_datapoints is %s', predict_datapoints)
results = []
results_datapoint_order = []
# We can use a with statement to ensure threads are cleaned up promptly
with concurrent.futures.ThreadPoolExecutor(max_workers=30) as executor:
# start a future for a thread which sends work in through the queue
future_to_datapoint = {
executor.submit(feed_the_workers, predict_datapoints, 0): 'FEEDER DONE'}
while future_to_datapoint:
# check for status of the futures which are currently working
done, not_done = concurrent.futures.wait(
future_to_datapoint, timeout=0.0,
return_when=concurrent.futures.FIRST_COMPLETED)
#done, not_done = concurrent.futures.wait(
# future_to_datapoint, timeout=0.0,
# return_when=concurrent.futures.ALL_COMPLETED)
# if there is incoming work, start a new future
while not q.empty():
# fetch a url from the queue
datapoint = q.get()
payload_one_item = datapoint
logging.warning('payload_one_item value is %s', payload_one_item)
# Start the load operation and mark the future with its datapoint
future_to_datapoint[executor.submit(process_one_datapoint, executor, payload_one_item)] = payload_one_item
# process any completed futures
for future in done:
datapoint = future_to_datapoint[future]
try:
logging.warning('In try loop')
logging.warning('In try loop future is %s', future)
if datapoint != 'FEEDER DONE':
print('In NOT FEEDER DONE')
data = future.result()
logging.warning('In try loop data1 is %s', data)
data = json.loads(data)
logging.warning('In try loop data2 is %s', data)
logging.warning('data value is %s', data)
results.append(data)
results_datapoint_order.append(datapoint)
except Exception as exc:
print('In Exception path')
print('exc: %s', exc)
print('%r generated an exception: %s' % (future, exc))
print('Finishing Exception path')
else:
if datapoint == 'FEEDER DONE':
data = future.result()
print(data)
else:
print('%r page is %d bytes' % (datapoint, len(data)))
# remove the now completed future
del future_to_datapoint[future]
datapoints_result_order = []
for item_in_list in results_datapoint_order:
datapoints_result = item_in_list['predict_datapoints']
datapoints_result_order.append(datapoints_result[0])
order_list_idx = []
for item_in_list in datapoints_result_order:
order_list_idx.append(predict_datapoints.index(item_in_list))
logging.warning('predict_datapoints value is %s', predict_datapoints)
logging.warning('results_datapoint_order value is %s', results_datapoint_order)
logging.warning('order_list_idx value is %s', order_list_idx)
results_ordered = [x for _,x in sorted(zip(order_list_idx,results))]
logging.warning('results value is %s', results)
logging.warning('results_ordered value is %s', results_ordered)
response = {
"statusCode": 200,
"body": json.dumps(results_ordered,
default=lambda x: x.decode('utf-8'))
}
return response
| 34.475138 | 122 | 0.616346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,659 | 0.265865 |
89b73d7d48109d66259c6829ecf5a9a833671f92 | 11,282 | py | Python | sailbot_ws/src/sailbot/sailbot/control_system.py | wpisailbot/sailbot21-22 | e4993b6077798e6df4f631d4f4cd69ef0b474b41 | [
"Apache-2.0"
] | 1 | 2022-01-25T18:26:17.000Z | 2022-01-25T18:26:17.000Z | sailbot_ws/src/sailbot/sailbot/control_system.py | wpisailbot/sailbot21-22 | e4993b6077798e6df4f631d4f4cd69ef0b474b41 | [
"Apache-2.0"
] | 1 | 2022-01-23T20:51:42.000Z | 2022-01-23T20:51:42.000Z | sailbot_ws/src/sailbot/sailbot/control_system.py | wpisailbot/sailbot21-22 | e4993b6077798e6df4f631d4f4cd69ef0b474b41 | [
"Apache-2.0"
] | 1 | 2021-11-18T21:15:15.000Z | 2021-11-18T21:15:15.000Z | from time import time
import rclpy
from rclpy.node import Node
import json
from std_msgs.msg import String, Float32, Int8, Int16
import sailbot.autonomous.p2p as p2p
from collections import deque
class ControlSystem(Node): # Gathers data from some nodes and distributes it to others
def __init__(self):
super().__init__('control_system')
# Create subcription to serial_rc topic
self.serial_rc_subscription = self.create_subscription(
String,
'serial_rc',
self.serial_rc_listener_callback,
10)
self.serial_rc_subscription
# Create subscription to airmar_data
self.airmar_data_subscription = self.create_subscription(
String,
'airmar_data',
self.airmar_data_listener_callback,
10)
self.airmar_data_subscription
# Create subscription to tt_telemetry
self.trim_tab_telemetry_subscription = self.create_subscription(
Float32,
'tt_telemetry',
self.trim_tab_telemetry_listener_callback,
10)
self.trim_tab_telemetry_subscription
# Create publisher to pwm_control
self.pwm_control_publisher_ = self.create_publisher(String, 'pwm_control', 10)
# Create publisher to trim_tab_control
self.trim_tab_control_publisher_ = self.create_publisher(Int8, 'tt_control', 10)
self.trim_tab_angle_publisher_ = self.create_publisher(Int16, 'tt_angle', 10)
# Create publisher to ballast_algorithnm_debug
self.ballast_algorithnm_debug_publisher_ = self.create_publisher(String, 'ballast_algorithnm_debug', 10)
# Create instance vars for subscribed topics to update
self.serial_rc = {}
self.airmar_data = {}
self.trim_tab_status = {}
# Create instance var for keeping queue of wind data
self.lastWinds = []
self.p2p_alg = None
# Create instance var for keeping queue of roll data
self.omega = deque(maxlen=4)
self.alpha = deque(maxlen=3)
self.lastRollAngle = deque(maxlen=4)
# self.p2p_alg = None
def serial_rc_listener_callback(self, msg):
self.get_logger().info('Received msg: "%s"' % msg.data)
msg_dict = json.loads(msg.data)
for i in msg_dict:
self.serial_rc[i] = msg_dict[i]
def airmar_data_listener_callback(self, msg):
self.get_logger().info('Received msg: "%s"' % msg.data)
msg_dict = json.loads(msg.data)
for i in msg_dict:
self.airmar_data[i] = msg_dict[i]
def trim_tab_telemetry_listener_callback(self, msg):
self.get_logger().info('Received msg: "%s"' % msg.data)
try:
self.trim_tab_status['wind_dir'] = msg.data
except Exception as e:
self.get_logger().error(str(e))
def update_winds(self, relative_wind):
# Check we have new wind
if len(self.lastWinds) != 0 and relative_wind == self.lastWinds[len(self.lastWinds) -1]:
return
# First add wind to running list
self.lastWinds.append(float(relative_wind))
if len(self.lastWinds) > 10:
self.lastWinds.pop(0)
# Now find best trim tab state
smooth_angle = self.median(self.lastWinds)
return smooth_angle
def find_trim_tab_state(self, relative_wind): #five states of trim
smooth_angle = self.update_winds(relative_wind)
msg = Int8()
if 45.0 <= smooth_angle < 135:
# Max lift port
msg.data = (0)
elif 135 <= smooth_angle < 180:
# Max drag port
msg.data = (2)
elif 180 <= smooth_angle < 225:
# Max drag starboard
msg.data = (3)
elif 225 <= smooth_angle < 315:
# Max lift starboard
msg.data = (1)
else:
# In irons, min lift
msg.data = (4)
self.trim_tab_control_publisher_.publish(msg)
def make_json_string(self, json_msg):
json_str = json.dumps(json_msg)
message = String()
message.data = json_str
return message
def median(self, lst):
n = len(lst)
s = sorted(lst)
return (sum(s[n//2-1:n//2+1])/2.0, s[n//2])[n % 2] if n else None
def ballast_algorithm(self):
# Check wind angle, then check current tilt of boat, then adjust ballast accordingly
if len(self.lastWinds) == 0:
return
self.lastRollAngle.append(self.airmar_data["roll"])
smooth_angle = self.median(self.lastWinds)
ballast_angle = 0
#print("roll:" + self.airmar_data["roll"])
delta = self.airmar_data["roll"] - self.lastRollAngle[-1]
timeDifference = .5 #hypothetically - see main
omega_n = delta/timeDifference
self.omega.append(omega_n)
alpha_n = self.omega[-1]/timeDifference
self.alpha.append(alpha_n)
#-- Logging ----------------
self.ballast_algorithnm_debug_publisher_.publish("omega: " + str(omega_n) + " -- " + "alpha / acceleration: " + str(alpha_n) + "\n")
#Account for a heavy tilt
#-----------
# Starboard tack
if 0 < smooth_angle <= 180:
# Go for 20 degrees
if float(self.airmar_data["roll"]) > -12: #change to roll acc.
#ballast_angle = 110
ballast_angle = omega_n * 2
elif float(self.airmar_data["roll"]) < -20: #change to roll acc.
ballast_angle = 80
#-----------
# Port tack
elif 180 < smooth_angle < 360:
if float(self.airmar_data["roll"]) < 12:
ballast_angle = 80
elif float(self.airmar_data["roll"]) > 20:
ballast_angle = 110
ballast_json = {"channel": "12", "angle": ballast_angle}
self.pwm_control_publisher_.publish(self.make_json_string(ballast_json))
def main(args=None):
rclpy.init(args=args)
control_system = ControlSystem()
while rclpy.ok():
rclpy.spin_once(control_system, timeout_sec=.5)
# Now we have new vals from subscribers in:
# control_system.serial_rc
# control_system.airmar_data
# control_system.trim_tab_status
# Need to publish new values to both control topics based on new values
# control_system.pwm_control_publisher_.publish() <----- i think both of these are notes from last year and have since been implemented
# control_system.trim_tab_control_publisher_.publish() <----- i think both of these are notes from last year and have since been implemented
#TODO ^^implement
if len(control_system.serial_rc) < 2:
pass # Don't have rc values
elif float(control_system.serial_rc["state2"]) > 600: # in RC
if float(control_system.serial_rc["state1"]) < 400:
# Manual
manual_angle = int((float(control_system.serial_rc["manual"]) / 2000) * 100) + 65
state_msg = Int8()
state_msg.data = 5
angle_msg = Int16()
angle_msg.data = manual_angle
control_system.trim_tab_control_publisher_.publish(state_msg)
control_system.trim_tab_angle_publisher_.publish(angle_msg)
elif "wind-angle-relative" in control_system.airmar_data:
# print(control_system.airmar_data["wind-angle-relative"])
try:
control_system.find_trim_tab_state(control_system.airmar_data["apparentWind"]["direction"])
except Exception as e:
control_system.get_logger().error(str(e))
else:
print("No wind angle values")
if float(control_system.serial_rc["state1"]) < 800:
ballast_angle = 0
if control_system.serial_rc["ballast"] > 1200:
ballast_angle = 110
elif control_system.serial_rc["ballast"] < 800:
ballast_angle = 80
ballast_json = {"channel" : "12", "angle" : ballast_angle}
control_system.pwm_control_publisher_.publish(control_system.make_json_string(ballast_json))
else:
control_system.ballast_algorithm()
rudder_angle = (float(control_system.serial_rc["rudder"]) / 2000 * 90) + 25
rudder_json = {"channel": "8", "angle": rudder_angle}
control_system.pwm_control_publisher_.publish(control_system.make_json_string(rudder_json))
elif float(control_system.serial_rc["state2"]) < 600:
destinations = [(42.277055,-71.799924),(42.276692,-71.799912)]
if 'Latitude' in control_system.airmar_data and 'Longitude' in control_system.airmar_data:
try:
if control_system.p2p_alg is None: # Instantiate new
control_system.p2p_alg = p2p.P2P((float(control_system.airmar_data['Latitude']), float(control_system.airmar_data['Longitude'])), destinations[0])
wind = control_system.update_winds(control_system.airmar_data["apparentWind"]["direction"])
action = control_system.p2p_alg.getAction(wind, float(control_system.airmar_data["magnetic-sensor-heading"]), float(control_system.airmar_data["track-degrees-true"]))
control_system.get_logger().error(str(control_system.p2p_alg.getdistance()))
control_system.get_logger().error(str(action))
if action['status'] == 'DONE':
if control_system.p2p_alg.dest == destinations[0]:
control_system.p2p_alg = p2p.P2P((control_system.airmar_data['Latitude'], control_system.airmar_data['Longitude']), destinations[1])
else:
control_system.p2p_alg = p2p.P2P((control_system.airmar_data['Latitude'], control_system.airmar_data['Longitude']), destinations[0])
else: # We have a non-done action (either trim tab or rudders)
if 'tt-state' in action:
control_system.trim_tab_control_publisher_.publish(int(action['tt-state']))
elif 'rudder-angle' in action:
rudder_json = {"channel": "8", "angle": int(action['rudder-angle'])}
control_system.pwm_control_publisher_.publish(control_system.make_json_string(rudder_json))
control_system.ballast_algorithm()
except Exception as e:
control_system.get_logger().error(str(e))
else:
control_system.get_logger().error("No latitude and longitude data")
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
control_system.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| 43.728682 | 186 | 0.60016 | 5,937 | 0.526236 | 0 | 0 | 0 | 0 | 0 | 0 | 2,535 | 0.224694 |
89b8b74f1ee518f5d1b44c2b1c318c3869dd4dbd | 192 | py | Python | tracedump/pwn_wrapper.py | Mic92/tracedumpd | a84eac58106f1f1d7a82f5dee2a327861e763e4e | [
"MIT"
] | 1 | 2021-03-22T18:04:53.000Z | 2021-03-22T18:04:53.000Z | tracedump/pwn_wrapper.py | Mic92/tracedump | a84eac58106f1f1d7a82f5dee2a327861e763e4e | [
"MIT"
] | null | null | null | tracedump/pwn_wrapper.py | Mic92/tracedump | a84eac58106f1f1d7a82f5dee2a327861e763e4e | [
"MIT"
] | null | null | null | import os
# stop pwnlib from doing fancy things
os.environ["PWNLIB_NOTERM"] = "1"
from pwnlib.elf.corefile import Coredump, Mapping # noqa: E402
from pwnlib.elf.elf import ELF # noqa: E402
| 27.428571 | 63 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 79 | 0.411458 |
89b92883d95ad1fae64e88b91d6848733b211d86 | 4,609 | py | Python | postcodeinfo/apps/postcode_api/admin.py | UKHomeOffice/postcodeinfo | f268a6b6d6d7ac55cb2fb307e98275348e30a287 | [
"MIT"
] | null | null | null | postcodeinfo/apps/postcode_api/admin.py | UKHomeOffice/postcodeinfo | f268a6b6d6d7ac55cb2fb307e98275348e30a287 | [
"MIT"
] | null | null | null | postcodeinfo/apps/postcode_api/admin.py | UKHomeOffice/postcodeinfo | f268a6b6d6d7ac55cb2fb307e98275348e30a287 | [
"MIT"
] | 1 | 2021-04-11T09:12:19.000Z | 2021-04-11T09:12:19.000Z | import django
from django.contrib.gis import admin
from .models import Address
# from https://djangosnippets.org/snippets/2593/
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.views.main import ChangeList
from django.core.paginator import InvalidPage, Paginator
from django.db import connection
class LargeTableChangeList(ChangeList):
"""
Overrides the count method to get an estimate instead of actual count when not filtered.
The only change is the try/catch block calculating 'full_result_count'
"""
def __init__(self, *args, **kwargs):
super(LargeTableChangeList, self).__init__(*args, **kwargs)
if django.VERSION < (1, 4):
from django.contrib.admin.views.main import MAX_SHOW_ALL_ALLOWED
self.list_max_show_all = MAX_SHOW_ALL_ALLOWED
def get_results(self, request):
paginator = self.model_admin.get_paginator(request, self.queryset, self.list_per_page)
# Get the number of objects, with admin filters applied.
result_count = paginator.count
# Get the total number of objects, with no admin filters applied.
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if not self.queryset.query.where:
full_result_count = result_count
else:
try:
cursor = connection.cursor()
cursor.execute("SELECT reltuples FROM pg_class WHERE relname = %s",
[self.root_queryset.query.model._meta.db_table])
full_result_count = int(cursor.fetchone()[0])
except:
full_result_count = self.root_queryset.count()
can_show_all = result_count <= self.list_max_show_all
multi_page = result_count > self.list_per_page
# Get the list of objects to display on this page.
if (self.show_all and can_show_all) or not multi_page:
result_list = self.queryset._clone()
else:
try:
result_list = paginator.page(self.page_num + 1).object_list
except InvalidPage:
raise IncorrectLookupParameters
self.result_count = result_count
self.full_result_count = full_result_count
self.result_list = result_list
self.can_show_all = can_show_all
self.multi_page = multi_page
self.paginator = paginator
class LargeTablePaginator(Paginator):
"""
Overrides the count method to get an estimate instead of actual count when not filtered
"""
def _get_count(self):
"""
Changed to use an estimate if the estimate is greater than 10,000
Returns the total number of objects, across all pages.
"""
try:
if self._count is not None:
return self._count
except AttributeError:
pass
try:
estimate = 0
if not self.object_list.query.where:
try:
cursor = connection.cursor()
cursor.execute(
"SELECT reltuples "
"FROM pg_class WHERE relname = %s",
[self.object_list.query.model._meta.db_table])
estimate = int(cursor.fetchone()[0])
except:
pass
if estimate < 10000:
self._count = self.object_list.count()
else:
self._count = estimate
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
# /from https://djangosnippets.org/snippets/2593/
class AddressAdmin(admin.OSMGeoAdmin, admin.ModelAdmin):
show_full_result_count = False
list_display = ('uprn', 'building_number', 'building_name',
'thoroughfare_name', 'post_town', 'postcode_index')
ordering = ('postcode_index','uprn')
paginator = LargeTablePaginator
def get_changelist(self, request, **kwargs):
return LargeTableChangeList
def get_queryset(self, request):
qs = super(AddressAdmin, self).get_queryset(request)
return qs
admin.site.register(Address, AddressAdmin)
| 36.872 | 94 | 0.632024 | 4,153 | 0.901063 | 0 | 0 | 0 | 0 | 0 | 0 | 1,254 | 0.272076 |
89b95121aacfb7a7ea543fd31b9769da68cb3cfc | 1,083 | py | Python | tests/molecular/bonds/bond/with_ids/test_with_ids.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | tests/molecular/bonds/bond/with_ids/test_with_ids.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | tests/molecular/bonds/bond/with_ids/test_with_ids.py | stevenbennett96/stk | 6e5af87625b83e0bfc7243bc42d8c7a860cbeb76 | [
"MIT"
] | null | null | null | from __future__ import annotations
from typing import Callable
import stk
def test_with_ids(
bond: stk.Bond,
get_id_map: Callable[[stk.Bond], dict[int, int]],
) -> None:
"""
Test :meth:`.Bond.with_ids`.
Parameters:
bond : :class:`.Bond`
The bond to test.
get_id_map:
Takes a single parameter, `bond`, and returns a valid
`id_map` parameter for its :meth:`.Bond.with_ids`
method. This allows the testing of different values of
this parameter.
"""
id_map = get_id_map(bond)
clone = bond.with_ids(id_map)
assert clone is not bond
expected_id1 = id_map.get(
bond.get_atom1().get_id(),
bond.get_atom1().get_id(),
)
assert clone.get_atom1().get_id() == expected_id1
expected_id2 = id_map.get(
bond.get_atom2().get_id(),
bond.get_atom2().get_id(),
)
assert clone.get_atom2().get_id() == expected_id2
assert bond.get_periodicity() == clone.get_periodicity()
assert bond.get_order() == clone.get_order()
| 23.543478 | 66 | 0.615882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 367 | 0.338873 |
89b954dd1798885bb3507d31ac4569dbd6df2065 | 4,131 | py | Python | app/services.py | fabl1106/emoji_rank | f3fa50b00d34092fe6b7317a1e85829c8647b574 | [
"MIT"
] | 10 | 2021-04-15T14:17:54.000Z | 2022-02-09T07:53:27.000Z | app/services.py | fabl1106/emoji_rank | f3fa50b00d34092fe6b7317a1e85829c8647b574 | [
"MIT"
] | null | null | null | app/services.py | fabl1106/emoji_rank | f3fa50b00d34092fe6b7317a1e85829c8647b574 | [
"MIT"
] | 4 | 2021-08-12T09:09:14.000Z | 2022-02-03T08:18:48.000Z | from dataclasses import dataclass
from app import crud
from app.schemas import UserCreate, SlackEventHook
from app.settings import REACTION_LIST, DAY_MAX_REACTION
# about reaction
REMOVED_REACTION = 'reaction_removed'
ADDED_REACTION = 'reaction_added'
APP_MENTION_REACTION = 'app_mention'
# about command
CREATE_USER_COMMAND = 'create_user'
@dataclass
class EventDto:
type: str # ex: reaction_added
user: str # 리액션을 한 유저(slack_id)
item: dict # type, channel, ts
reaction: str # 리액션(이모지)
item_user: str # 리액션을 받은 유저(slack_id)
event_ts: str
text: str # app mention text
def __init__(self, event_data):
self.type = event_data.get('type')
self.user = event_data.get('user')
self.item = event_data.get('item')
self.reaction = event_data.get('reaction')
self.item_user = event_data.get('item_user')
self.event_ts = event_data.get('event_ts')
self.text = event_data.get('text')
@dataclass
class AddUserCommandDto:
name: str
slack_id: str
avatar_url: str
def __init__(self, name: str, slack_id: str, avatar_url: str):
self.name = name.strip('name=')
self.slack_id = slack_id.strip('slack_id=')
self.avatar_url = avatar_url.strip('avatar_url=')
class SlackService(object):
def check_challenge(self, event: SlackEventHook, db) -> dict:
# slack Enable Events
if 'challenge' in event:
return {"challenge": event['challenge']}
# check slack event
if "event" in event:
event_dto = EventDto(event['event'])
if event_dto.type in [ADDED_REACTION, REMOVED_REACTION]:
# 다른 사람에게만 이모지 줄 수 있음
if event_dto.item_user != event_dto.user:
self.assign_emoji(event_dto, db)
elif event_dto.type == APP_MENTION_REACTION:
self.manage_app_mention(event_dto, db)
return {}
def assign_emoji(self, event: EventDto, db):
"""
reaction process
"""
if event.reaction not in REACTION_LIST:
return
if event.type == ADDED_REACTION:
user = crud.get_user(db, event.user)
# 멤버에게 줄 수 있는 나의 reaction 개수 체크
if user.my_reaction > 0:
crud.update_my_reaction(db, user, False)
crud.update_added_reaction(db=db, type=event.reaction, item_user=event.item_user,
user=event.user, is_increase=True)
elif event.type == REMOVED_REACTION:
user = crud.get_user(db, event.user)
# 멤버에게 전달한 reaction을 삭제하는 경우 (이미 하루 최대의 reaction 개수인 경우 더이상 추가하지 않음)
if user.my_reaction < DAY_MAX_REACTION:
crud.update_my_reaction(db, user, True)
crud.update_added_reaction(db=db, type=event.reaction, item_user=event.item_user,
user=event.user, is_increase=False)
def manage_app_mention(self, event: EventDto, db):
"""
명령어를 분기 처리하는 함수
ex: <@ABCDEFG> --create_user --name=JAY --slack_id=ABCDEFG --avatar_url=https://blablac.com/abcd
"""
event_command = event.text.split('--')
event_command.pop(0) # 첫번째 값은 user slack_id
if not event_command:
return
_type = event_command.pop(0).strip(' ')
if _type == CREATE_USER_COMMAND:
if len(event_command) == 3:
add_user_cmd_dto = AddUserCommandDto(event_command[0], event_command[1], event_command[2])
self.add_user(add_user_cmd_dto, db)
def add_user(self, add_user_cmd_dto: AddUserCommandDto, db):
"""
user 추가 명령어
"""
db_user = crud.get_user(db, item_user=add_user_cmd_dto.slack_id)
if db_user:
return
user = UserCreate(username=add_user_cmd_dto.name, slack_id=add_user_cmd_dto.slack_id,
using_emoji_count=DAY_MAX_REACTION, get_emoji_count=0,
avatar_url=add_user_cmd_dto.avatar_url)
crud.create_user(db=db, user=user)
| 33.860656 | 106 | 0.616074 | 3,967 | 0.913424 | 0 | 0 | 965 | 0.222197 | 0 | 0 | 954 | 0.219664 |
89baa3cabe37903c188fd9cb2212f197553a6887 | 2,010 | py | Python | util/build_tests.py | skruger/AVWX-Engine | c3c0b90a3aa57d43bea47d07b977d71b90f987b9 | [
"MIT"
] | null | null | null | util/build_tests.py | skruger/AVWX-Engine | c3c0b90a3aa57d43bea47d07b977d71b90f987b9 | [
"MIT"
] | 3 | 2019-11-21T17:59:14.000Z | 2019-12-04T03:45:05.000Z | util/build_tests.py | AirbusDriver/avwx-engine | e3fed8f744a48faca58c3e94ddbf214f9c719d3d | [
"MIT"
] | null | null | null | """
Creates files for end-to-end tests
python util/build_tests.py
"""
# stdlib
import json
from dataclasses import asdict
# module
import avwx
def make_metar_test(station: str) -> dict:
"""
Builds METAR test file for station
"""
m = avwx.Metar(station)
m.update()
# Clear timestamp due to parse_date limitations
m.data.time = None
return {
"data": asdict(m.data),
"translations": asdict(m.translations),
"summary": m.summary,
"speech": m.speech,
"station_info": asdict(m.station_info),
}
def make_taf_test(station: str, report: str = None) -> dict:
"""
Builds TAF test file for station
"""
t = avwx.Taf(station)
t.update(report)
data = asdict(t.data)
# Clear timestamp due to parse_date limitations
for key in ("time", "start_time", "end_time"):
data[key] = None
for i in range(len(data["forecast"])):
for key in ("start_time", "end_time"):
data["forecast"][i][key] = None
return {
"data": data,
"translations": asdict(t.translations),
"summary": t.summary,
"speech": t.speech,
"station_info": asdict(t.station_info),
}
def make_pirep_test(station: str) -> [dict]:
"""
Builds PIREP test file for station
"""
p = avwx.Pireps(station)
p.update()
ret = []
if not p.data:
return
for report in p.data:
# Clear timestamp due to parse_date limitations
report.time = None
ret.append({"data": asdict(report)})
return {"reports": ret, "station_info": asdict(p.station_info)}
if __name__ == "__main__":
from pathlib import Path
for target in ("metar", "taf", "pirep"):
for station in ("KJFK", "KMCO", "PHNL", "EGLL"):
data = locals()[f"make_{target}_test"](station)
if data:
path = Path("tests", target, station + ".json")
json.dump(data, path.open("w"), indent=4, sort_keys=True)
| 25.443038 | 73 | 0.58607 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 667 | 0.331841 |
89baa670798425bdbbb997843280bd98d927b769 | 42 | py | Python | pacote-download/ex(1-100)/ex112/utilidadescev/__init__.py | gssouza2051/python-exercicios | 81e87fed7ead0adf58473a741aaa3c83064f6cb5 | [
"MIT"
] | null | null | null | pacote-download/ex(1-100)/ex112/utilidadescev/__init__.py | gssouza2051/python-exercicios | 81e87fed7ead0adf58473a741aaa3c83064f6cb5 | [
"MIT"
] | null | null | null | pacote-download/ex(1-100)/ex112/utilidadescev/__init__.py | gssouza2051/python-exercicios | 81e87fed7ead0adf58473a741aaa3c83064f6cb5 | [
"MIT"
] | null | null | null | from ex111.utilidadescev import moeda,dado | 42 | 42 | 0.880952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
89bb8aec70dc5ddd26138767ff03178f1d8f86d1 | 830 | py | Python | sdk/pyseele/clazz.py | rinkako/SeeleFlow | 4b8858f367ef02a568c03c8114e1c1b221208feb | [
"MIT"
] | 3 | 2019-12-20T13:32:34.000Z | 2021-04-27T06:02:28.000Z | sdk/pyseele/clazz.py | rinkako/SeeleFlow | 4b8858f367ef02a568c03c8114e1c1b221208feb | [
"MIT"
] | null | null | null | sdk/pyseele/clazz.py | rinkako/SeeleFlow | 4b8858f367ef02a568c03c8114e1c1b221208feb | [
"MIT"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
Project Seele
@author : Rinka
@date : 2019/12/17
"""
from pyseele.workitem import Workitem
class ResourcingContext:
"""
Resourcing Context maintains all org.rinka.seele.server.resource service principals that guide RS
to handle the workitem.
"""
def __init__(self):
# resourcing unique id
self.rsid: str = None
# process runtime unique id
self.rtid: str = None
# priority, bigger schedule faster
self.priority: int = 0
# execution cost in ms
self.execution_time_span: int = 0
# TODO: service type, indicate the action RS should do
self.rs_type = None
# TODO: resourcing context
self.rs_properties: dict = None
# binding workitem
self.workitem: Workitem = None
| 23.055556 | 101 | 0.624096 | 706 | 0.850602 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.512048 |
89bec5ceb25cd9718ad4b6c93d9467f0e3ef9ba6 | 34,847 | py | Python | scripts/python/turtleRelated/circleint.py | jeremiahmarks/dangerzone | fe2946b8463ed018d2136ca0eb178161ad370565 | [
"MIT"
] | 1 | 2015-08-15T05:25:35.000Z | 2015-08-15T05:25:35.000Z | scripts/python/turtleRelated/circleint.py | jeremiahmarks/dangerzone | fe2946b8463ed018d2136ca0eb178161ad370565 | [
"MIT"
] | null | null | null | scripts/python/turtleRelated/circleint.py | jeremiahmarks/dangerzone | fe2946b8463ed018d2136ca0eb178161ad370565 | [
"MIT"
] | null | null | null | import math
import fvh2, fvh
import supercircle
masterCircleSet=set()
circlecalled = 0
checkcirclescalled = 0
MINOFFSET=5
class Circle():
def __init__(self,x,y,r,lm=None, keep=True):
global circlecalled
circlecalled+=1
self.keep = keep
self.center=(x,y)
self.radius=r
self.checkString=(int(x)/MINOFFSET*MINOFFSET,int(y)/MINOFFSET*MINOFFSET,r)
masterCircleSet.add(self.checkString)
self.color="black"
if not lm:
self.lm=fvh2.fvh.MyTurtle()
self.lm.tracer(False)
else:
self.lm=lm
#self.draw()
def draw(self):
#self.lm=fvh2.fvh.MyTurtle()
self.lm.pencolor(self.color)
self.lm.setup()
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
if not self.keep:
self.lm.undo()
self.lm.undo()
def drawred(self):
self.lm.pencolor('red')
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
def drawwhite(self):
self.lm.pencolor('white')
self.lm.penup()
fvh2.circlearound(self.center, self.radius,self.lm)
def setcolor(self, color):
self.color=color
def realCards(self):
self.realcards=[]
self.lm.pu()
for x in range(4):
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+90*x)
self.lm.fd(self.radius)
self.realcards.append(Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2))
def extendedCards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+180+x*angle)
self.lm.fd(self.radius)
a=Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep)
self.cardinals.append(a)
if (self.radius/2>=4):
a.extendedCards(numberOfexteriorCircles)
for card in a.cardinals:
self.cardinals.append(card)
def innerextendedCards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+x*angle)
self.lm.fd(self.radius)
a=Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep)
self.cardinals.append(a)
if (self.radius/2>=4):
a.innerextendedCards(numberOfexteriorCircles)
for card in a.cardinals:
self.cardinals.append(card)
def differentcards(self, numberOfexteriorCircles):
self.cardinals=[]
angle=360.0/numberOfexteriorCircles
for x in range(numberOfexteriorCircles):
self.lm.pu()
self.lm.goto(self.center)
self.lm.seth(self.lm.towards(0,0)+180+x*angle)
self.lm.fd(self.radius)
self.cardinals.append(Circle(self.lm.xcor(), self.lm.ycor(), self.radius/2, self.lm, self.keep))
def addCardinals(self):
self.cardinals=[]
self.cardinals.append(Circle(self.center[0]+self.radius, self.center[1], self.radius/2))
self.cardinals.append(Circle(self.center[0]-self.radius, self.center[1], self.radius/2))
self.cardinals.append(Circle(self.center[0], self.center[1]+self.radius, self.radius/2))
self.cardinals.append(Circle(self.center[0], self.center[1]-self.radius, self.radius/2))
#for eachcircle in self.cardinals:
# eachcircle.draw()
def comparetoCardinals(self):
self.primarytocardinals=[]
for eachcircle in self.cardinals:
intersectionpoints=circleinter(self.center, self.radius, eachcircle.center, eachcircle.radius)
self.primarytocardinals.append(Circle(intersectionpoints[0][0], intersectionpoints[0][1], self.radius))
self.primarytocardinals.append(Circle(intersectionpoints[1][0], intersectionpoints[1][1], self.radius))
def checkCircles(circle1, circle2):
global checkcirclescalled
checkcirclescalled+=1
points=circleinter(circle1.center, circle1.radius, circle2.center, circle2.radius)
if points:
points=((float("%.2f" % points[0][0]),float("%.2f" % points[0][1])),(float("%.2f" % points[1][0]),float("%.2f" % points[1][1])))
return points
def circleinter((x0, y0), r0, (x1, y1), r1):
"""
This modules accepts two circles and then determines where they meet.
the circles are submitted as x,y,r where x,y is the center of the circle
and r is the radius.
"""
dx=float(x1-x0)
dy=float(y1-y0)
d=(dx**2+dy**2)**0.5
if (d>(r0+r1)):
return None
if (d< math.fabs(r0-r1)):
return None
if (d==0):
return None
a = ((r0*r0) - (r1*r1) + (d*d)) / (2.0 * d)
x2 = x0 + (dx * a/d)
y2 = y0 + (dy * a/d)
h = ((r0*r0) - (a*a))**0.5
rx = -dy * (h/d)
ry = dx * (h/d)
xi = x2 + rx
xi_prime = x2 - rx
yi = y2 + ry
yi_prime = y2 - ry
return (xi,yi),(xi_prime,yi_prime)
def differentCircles(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+str(values[1])
filename='circles/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
primaryCircle.draw()
circlelist.append(primaryCircle)
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth(circle*secondaryCircleTheta)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(firstCircleplace,len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.draw()
newlist.append(temp)
ts.update()
counter=len(circlelist)
for item in newlist:
item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
fvh2.savetocircles(lm,filename)
def differentCirclesforViewing(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
"""
This is designed with something like the following in mind:
lm=circleint.fvh2.fvh.MyTurtle()
for a in range(2,100):
for b in range(3600):
circleint.differentCirclesforAnimation(200,15,a,b/10.0,lm)
lm.clear()
and then make a gif of the results
"""
global masterCircleSet
masterCircleSet=set()
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+'%03d' % values[1]
filename='circles/testa/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
primaryCircle.draw()
circlelist.append(primaryCircle)
colorcounter=0
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth((secondaryCircleTheta+(circle*secondaryCircleTheta))%360)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
ts.update()
#masterCircleSet=set()
counter=len(circlelist)
for item in newlist:
#item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
#fvh2.savetocircles(lm,filename,aheight=(primaryCircleRadius+secondaryCircleRadius),awidth=(primaryCircleRadius+secondaryCircleRadius),ax=-(primaryCircleRadius+secondaryCircleRadius)/2.0, ay=-(primaryCircleRadius+secondaryCircleRadius)/2.0 )
fvh2.savetocircles(lm,filename,togif=True)#,aheight=(primaryCircleRadius+secondaryCircleRadius),awidth=(primaryCircleRadius+secondaryCircleRadius))#,ax=-(primaryCircleRadius+secondaryCircleRadius)/2.0, ay=-(primaryCircleRadius+secondaryCircleRadius)/2.0 )
def differentCirclesforAnimation(primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta,lm=None):
"""
This is designed with something like the following in mind:
lm=circleint.fvh2.fvh.MyTurtle()
for a in range(2,100):
for b in range(3600):
circleint.differentCirclesforAnimation(200,15,a,b/10.0,lm)
lm.clear()
and then make a gif of the results
"""
filenameStrings=['primaryCircleRadius','secondaryCircleRadius','numberOfSecondaryCircles','secondaryCircleTheta']
filenameValues=[primaryCircleRadius, secondaryCircleRadius, numberOfSecondaryCircles, secondaryCircleTheta]
filenameZip=zip(filenameStrings,filenameValues)
filename=''
for values in filenameZip:
filename=filename+values[0]+str(values[1])
filename='circles/neatani/'+filename+'.eps'
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
ts=lm.getscreen()
circlelist=[]
newlist=[]
primaryCircle=Circle(0,0,primaryCircleRadius,lm)
#primaryCircle.draw()
circlelist.append(primaryCircle)
colorcounter=0
for circle in range(numberOfSecondaryCircles):
lm.pu()
lm.goto(primaryCircle.center)
lm.seth((secondaryCircleTheta+(circle*secondaryCircleTheta))%360)
lm.fd(primaryCircleRadius)
temp=Circle(lm.xcor(), lm.ycor(), secondaryCircleRadius, lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
circlelist.append(temp)
totalbefore=len(circlelist)
totalafter=0
counter=0
while(totalbefore!=totalafter):
totalbefore=len(circlelist)
for firstCircleplace in range(len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(firstCircleplace,len(circlelist)):
secondCircle=circlelist[secondCircleplace]
thisRadius=min(firstCircle.radius, secondCircle.radius)/2
if (thisRadius<10):
continue
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[0][0], newCircles[0][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,thisRadius) not in masterCircleSet):
temp=Circle(newCircles[1][0], newCircles[1][1], thisRadius,lm)
temp.setcolor(fvh.allcolors[colorcounter%len(fvh.allcolors)])
colorcounter+=1
temp.draw()
newlist.append(temp)
ts.update()
counter=len(circlelist)
for item in newlist:
#item.draw()
circlelist.append(item)
ts.update()
newlist=[]
totalafter=len(circlelist)
#fvh2.savetocircles(lm,filename)
def createDrawing(bigdiameter,diameter):
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
a=Circle(0,0,bigdiameter,lm)
b=Circle(bigdiameter,0,diameter,lm)
circlelist=[a,b]
totalbefore=len(masterCircleSet)
totalafter=0
newlist=[]
counter=0
#print totalbefore
while((totalbefore!=totalafter) and (len(masterCircleSet)<750)):
#print (circlecalled, checkcirclescalled)
#print totalbefore, totalafter
#raw_input()
print len(masterCircleSet)
totalbefore=len(masterCircleSet)
for firstCircleplace in range(counter,len(circlelist)):
firstCircle=circlelist[firstCircleplace]
for secondCircleplace in range(len(circlelist)):
secondCircle=circlelist[secondCircleplace]
newCircles=checkCircles(firstCircle, secondCircle)
#print newCircles, len(newlist)
#raw_input((totalbefore,totalafter))
if newCircles:
if ((int(newCircles[0][0])/MINOFFSET*MINOFFSET,int(newCircles[0][1])/MINOFFSET*MINOFFSET,diameter) not in masterCircleSet):
newlist.append(Circle(newCircles[0][0], newCircles[0][1], diameter,lm))
else:
print newCircles[0]
if ((int(newCircles[1][0])/MINOFFSET*MINOFFSET,int(newCircles[1][1])/MINOFFSET*MINOFFSET,diameter) not in masterCircleSet):
newlist.append(Circle(newCircles[1][0], newCircles[1][1], diameter,lm))
else:
print newCircles[1]
counter=len(circlelist)
for item in newlist:
item.draw()
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
lm.tracer(True)
a.lm.tracer(True)
fvh2.savetocircles(a.lm)
def createanotherdrawing(startSize):
a=Circle(0,0,startSize)
smallestsize=startSize
a.addCardinals()
a.lm.undo()
a.lm.undo()
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
circlelist.append(eachitem)
eachitem.lm.undo()
eachitem.lm.undo()
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
for secondCircle in circlelist:
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2
if (thisDiameter<=1):
#print "first break"
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter)
newCircle.draw()
circlelist.append(newCircle)
#for eachCard in newCircle.cardinals:
#circlelist.append(eachCard)
#if (thisDiameter<=1):
#print "second break"
for item in newlist:
circlelist.append(item)
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
def yetanotherdrawing(startdiameter,numberofoutsidecircles):
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
a.lm.undo()
a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.lm.undo()
eachitem.lm.undo()
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
print "new firstCircle : " + str(firstCircle.checkString)
print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (thisDiameter<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
#newCircle.realCards()
circlelist.append(newCircle)
#for eachCard in newCircle.realcards:
# circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
for acircle in circlelist:
acircle.draw()
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagain(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
#print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
item.draw()
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagainwithmax(startdiameter,numberofoutsidecircles, recursive=False, lm=None,stepsize=2):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm,False)
# a.lm.undo()
# a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
# print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#firstCircle.drawred()
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/float(stepsize)
if (min(firstCircle.radius, secondCircle.radius)<=1):
#print "first break"
#secondCircle.draw()
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newCircle.draw()
circlelist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#secondCircle.draw()
#firstCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yadwm(startdiameter):
smallestsize=startdiameter
a=Circle(0,0,startdiameter)
a.addCardinals()
a.lm.undo()
a.lm.undo()
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.lm.undo()
eachitem.lm.undo()
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
for secondCircle in circlelist:
thisDiameter=max(firstCircle.radius, secondCircle.radius)/2.0
if (thisDiameter<=32):
#print "first break"
break
if thisDiameter<smallestsize:
smallestsize=thisDiameter
print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
#lm.tracer(False)
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter)
newCircle.addCardinals()
newCircle.draw()
circlelist.append(newCircle)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#lm.tracer(True)
#if (thisDiameter<=1):
#print "second break"
for item in newlist:
circlelist.append(item)
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
def makeart1():
for size in range(7,11):
for numberofsides in range(1,10):
for recursive in (False, True):
print 2**size,numberofsides,recursive
lm=fvh2.fvh.MyTurtle()
ts=lm.getscreen()
ts.screensize(2**(size+2),2**(size+2),'grey50')
ts.setup(2**(size+3),2**(size+3),0,0)
yetanotherdrawingagain(2**size,numberofsides,recursive,lm)
tc=ts.getcanvas()
filename="circles/startSize"+str(size)+"numberofsides"+str(numberofsides)+str(recursive)+'.eps'
ts.update()
tc.postscript(file=filename, height=2**(size+2), width=2**(size+2),x=-2**(size+1),y=-2**(size+1))
ts.bye()
def makeart2():
for size in range(8,11):
for numberofsides in range(6,10):
for recursive in (False, True):
for stepsize in range(2,4):
print stepsize**size,numberofsides,recursive
lm=fvh2.fvh.MyTurtle()
ts=lm.getscreen()
ts.screensize(stepsize**(size+2),stepsize**(size+2),'grey50')
ts.setup(stepsize**(size+3),stepsize**(size+3),0,0)
yetanotherdrawingagainwithmax(stepsize**size,numberofsides,recursive,lm,stepsize)
tc=ts.getcanvas()
filename="circles/max"+str(size)+str(numberofsides)+str(recursive)+'.eps'
tc.postscript(file=filename, height=stepsize**(size+2), width=stepsize**(size+2),x=-stepsize**(size+1),y=-stepsize**(size+1))
ts.bye()
def yetanotherdrawingagainwithcontinue(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
lm.tracer(False)
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
a.draw()
a.lm.undo()
a.lm.undo()
a.differentcards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
eachitem.draw()
eachitem.lm.undo()
eachitem.lm.undo()
#eachitem.draw()
eachitem.differentcards(numberofoutsidecircles)
for subitem in eachitem.cardinals:
subitem.draw()
subitem.lm.undo()
subitem.lm.undo()
circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
#print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newCircle.draw()
newlist.append(newCircle)
if recursive:
newCircle.differentcards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
eachCard.draw()
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
lm.tracer(True)
fvh2.savetocircles(a.lm)
def yetanotherdrawingagainwithcontinueandextended(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.extendedCards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
#eachitem.differentcards(numberofoutsidecircles)
#for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
#circlelist.append(subitem)
circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.extendedCards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
return circlelist
def yadei(startdiameter,numberofoutsidecircles, recursive=False, lm=None):
global masterCircleSet
masterCircleSet=set()
if not lm:
lm=fvh2.fvh.MyTurtle()
lm.setup()
smallestsize=startdiameter
a=Circle(0,0,startdiameter,lm)
# a.lm.undo()
# a.lm.undo()
a.innerextendedCards(numberofoutsidecircles)
circlelist=[]
circlelist.append(a)
#for eachitem in a.cardinals:
#eachitem.lm.undo()
#eachitem.lm.undo()
#eachitem.differentcards(numberofoutsidecircles)
#for subitem in eachitem.cardinals:
#subitem.lm.undo()
#subitem.lm.undo()
#circlelist.append(subitem)
#circlelist.append(eachitem)
totalbefore=len(masterCircleSet)
totalafter=0
while ((totalbefore!=totalafter)):
print "Just started new while loop. number of circles in circlelist: "+str(len(circlelist))
totalbefore=len(masterCircleSet)
newlist=[]
for firstCircle in circlelist:
#print "new firstCircle : " + str(firstCircle.checkString)
#print "Current number of circles in circlelist: "+str(len(circlelist))
#firstCircle.drawred()
for secondCircle in circlelist:
#secondCircle.drawred()
thisDiameter=min(firstCircle.radius, secondCircle.radius)/2.0
if (min(firstCircle.radius, secondCircle.radius)<=4):
#print "first break"
#secondCircle.draw()
continue
if thisDiameter<smallestsize:
smallestsize=thisDiameter
#print "New Smallest Size: "+ str(smallestsize)
newCircles=checkCircles(firstCircle, secondCircle)
if newCircles:
for x in newCircles:
if ((int(x[0])/MINOFFSET*MINOFFSET, int(x[1])/MINOFFSET*MINOFFSET, thisDiameter) not in masterCircleSet):
newCircle=Circle(x[0], x[1],thisDiameter,lm)
newlist.append(newCircle)
if recursive:
newCircle.innerextendedCards(numberofoutsidecircles)
for eachCard in newCircle.cardinals:
circlelist.append(eachCard)
#secondCircle.draw()
#if (thisDiameter<=1):
#print "second break"
#firstCircle.draw()
for item in newlist:
circlelist.append(item)
newlist=[]
totalafter=len(masterCircleSet)
if (totalafter==totalbefore):
print "no more moves"
fvh2.savetocircles(a.lm)
return circlelist
def itsOct():
pass
| 34.130264 | 257 | 0.662697 | 3,762 | 0.107958 | 0 | 0 | 0 | 0 | 0 | 0 | 5,831 | 0.167331 |
89bed3c6f4358ea5beffd07b449a6c39b7b53e4d | 392 | py | Python | test_calculations.py | joshmgrant/pybay-pytest-the-awesome-parts-code | 4b9fccb8b561e5745c2498fb4360c40a5944694c | [
"Unlicense"
] | null | null | null | test_calculations.py | joshmgrant/pybay-pytest-the-awesome-parts-code | 4b9fccb8b561e5745c2498fb4360c40a5944694c | [
"Unlicense"
] | null | null | null | test_calculations.py | joshmgrant/pybay-pytest-the-awesome-parts-code | 4b9fccb8b561e5745c2498fb4360c40a5944694c | [
"Unlicense"
] | null | null | null | from calculations import TemperatureConverter
def testfreezing_fahrenheit():
converter = TemperatureConverter()
actual = converter.to_celsius(32.0)
expected = 0.0
assert abs(expected - actual) < 0.01
def test_freezing_celsius():
converter = TemperatureConverter()
actual = converter.to_fahrenheit(0)
expected = 32.0
assert abs(expected - actual) < 0.01
| 20.631579 | 45 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
981ec4cd5715c632ca338d47553a10ae304b14da | 12,359 | py | Python | utils.py | xdr940/cc | a98fe9b6c33c332a4c399f968032a90989c55672 | [
"MIT"
] | null | null | null | utils.py | xdr940/cc | a98fe9b6c33c332a4c399f968032a90989c55672 | [
"MIT"
] | 1 | 2019-08-16T07:09:22.000Z | 2019-09-04T04:59:51.000Z | utils.py | xdr940/cc | a98fe9b6c33c332a4c399f968032a90989c55672 | [
"MIT"
] | 1 | 2020-01-13T04:51:22.000Z | 2020-01-13T04:51:22.000Z | from __future__ import division
import shutil
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from path import Path
from collections import OrderedDict
import datetime
epsilon = 1e-8
def high_res_colormap(low_res_cmap, resolution=1000, max_value=1):
# Construct the list colormap, with interpolated values for higer resolution
# For a linear segmented colormap, you can just specify the number of point in
# cm.get_cmap(name, lutsize) with the parameter lutsize
x = np.linspace(0,1,low_res_cmap.N)
low_res = low_res_cmap(x)
new_x = np.linspace(0,max_value,resolution)
high_res = np.stack([np.interp(new_x, x, low_res[:,i]) for i in range(low_res.shape[1])], axis=1)
return ListedColormap(high_res)
def opencv_rainbow(resolution=1000):
# Construct the opencv equivalent of Rainbow
opencv_rainbow_data = (
(0.000, (1.00, 0.00, 0.00)),
(0.400, (1.00, 1.00, 0.00)),
(0.600, (0.00, 1.00, 0.00)),
(0.800, (0.00, 0.00, 1.00)),
(1.000, (0.60, 0.00, 1.00))
)
return LinearSegmentedColormap.from_list('opencv_rainbow', opencv_rainbow_data, resolution)
COLORMAPS = {'rainbow': opencv_rainbow(),
'magma': high_res_colormap(cm.get_cmap('magma')),
'bone': cm.get_cmap('bone', 10000)}
def tensor2array(tensor, max_value=None, colormap='rainbow',out_shape = 'CHW'):
tensor = tensor.detach().cpu()
if max_value is None:
max_value = tensor.max().item()
if tensor.ndimension() == 2 or tensor.size(0) == 1:
norm_array = tensor.squeeze().numpy()/max_value
array = COLORMAPS[colormap](norm_array).astype(np.float32)
array = array[:,:,:3]
array = array.transpose(2, 0, 1)
elif tensor.ndimension() == 3:
if (tensor.size(0) == 3):
array = 0.5 + tensor.numpy()*0.5
elif (tensor.size(0) == 2):
array = tensor.numpy()
if out_shape == 'HWC':
array = array.transpose(1,2,0)
return array
def save_checkpoint(save_path, dispnet_state, posenet_state, masknet_state, flownet_state,is_best, filename='checkpoint.pth.tar'):
file_prefixes = ['dispnet', 'posenet', 'masknet', 'flownet']
states = [dispnet_state, posenet_state, masknet_state, flownet_state]
for (prefix, state) in zip(file_prefixes, states):
if state ==None:
continue
torch.save(state, save_path/'{}_{}'.format(prefix,filename))
if is_best:
for prefix in file_prefixes:
shutil.copyfile(save_path/'{}_{}'.format(prefix,filename), save_path/'{}_model_best.pth.tar'.format(prefix))
def save_path_formatter(args, parser):
def is_default(key, value):
return value == parser.get_default(key)
args_dict = vars(args)
data_folder_name = str(Path(args_dict['data']).normpath().name)
folder_string = [data_folder_name]
if not is_default('epochs', args_dict['epochs']):
folder_string.append('{}epochs'.format(args_dict['epochs']))
keys_with_prefix = OrderedDict()
keys_with_prefix['epoch_size'] = 'epoch_size'
keys_with_prefix['sequence_length'] = 'seq'
keys_with_prefix['rotation_mode'] = 'rot_'
keys_with_prefix['padding_mode'] = 'padding_'
keys_with_prefix['batch_size'] = 'b'
keys_with_prefix['lr'] = 'lr'
#keys_with_prefix['photo_loss_weight'] = 'p'
#keys_with_prefix['mask_loss_weight'] = 'm'
#keys_with_prefix['smooth_loss_weight'] = 's'
for key, prefix in keys_with_prefix.items():
value = args_dict[key]
if not is_default(key, value):
folder_string.append('{}{}'.format(prefix, value))
save_path = Path(','.join(folder_string))
timestamp = datetime.datetime.now().strftime("%m-%d-%H:%M")
return save_path/timestamp
def flow2rgb(flow_map, max_value):# [2,370,1224]
'''
[2,h,w]2[3,h,w]
:param flow_map:
:param max_value:
:return:
'''
#eturned Tensor shares the same storage with the original one.
# In-place modifications on either of them will be seen, and may trigger errors in correctness checks.
def one_scale(flow_map, max_value):
flow_map_np = flow_map.detach().cpu().numpy()#??what trick
_, h, w = flow_map_np.shape#[2,h,w]
flow_map_np[:,(flow_map_np[0] == 0) & (flow_map_np[1] == 0)] = float('nan')#??? 两幅图中某个位置像素都等于0的置位nan
rgb_map = np.ones((3,h,w)).astype(np.float32)#占位符
#normalization
if max_value is not None:
normalized_flow_map = flow_map_np / max_value
else:
#normalized_flow_map = (flow_map_np-flow_map_np.mean())/np.ndarray.std(flow_map_np)
normalized_flow_map = flow_map_np / (np.abs(flow_map_np).max())
#vector2color coding
rgb_map[0] += normalized_flow_map[0]
rgb_map[1] -= 0.5*(normalized_flow_map[0] + normalized_flow_map[1])
rgb_map[2] += normalized_flow_map[1]
return rgb_map.clip(0,1)#上溢,下溢处理,smaller than 0 become 0, and values larger than 1 become 1, 区间内的值不动
if type(flow_map) not in [tuple, list]:
return one_scale(flow_map,max_value)
else:
return [one_scale(flow_map_,max_value) for flow_map_ in flow_map]
#process_function and process_function using
def logical_or(a, b):
return 1 - (1 - a)*(1 - b)
def robust_l1_per_pix(x, q=0.5, eps=1e-2):
x = torch.pow((x.pow(2) + eps), q)
return x
def robust_l1(x, q=0.5, eps=1e-2):
x = torch.pow((x.pow(2) + eps), q)
x = x.mean()
return x
def spatial_normalize(disp):
_mean = disp.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)#对BHWC整体求平均值
disp = disp / _mean
return disp
def compute_all_epes(gt, rigid_pred, non_rigid_pred, rigidity_mask, THRESH=0.5):
_, _, h_pred, w_pred = rigid_pred.size()
_, _, h_gt, w_gt = gt.size()
rigidity_pred_mask = nn.functional.interpolate(rigidity_mask, size=(h_pred, w_pred), mode='bilinear')
rigidity_gt_mask = nn.functional.interpolate(rigidity_mask, size=(h_gt, w_gt), mode='bilinear')
non_rigid_pred = (rigidity_pred_mask<=THRESH).type_as(non_rigid_pred).expand_as(non_rigid_pred) * non_rigid_pred
rigid_pred = (rigidity_pred_mask>THRESH).type_as(rigid_pred).expand_as(rigid_pred) * rigid_pred
total_pred = non_rigid_pred + rigid_pred
gt_non_rigid = (rigidity_gt_mask<=THRESH).type_as(gt).expand_as(gt) * gt
gt_rigid = (rigidity_gt_mask>THRESH).type_as(gt).expand_as(gt) * gt
all_epe = compute_epe(gt, total_pred)
rigid_epe = compute_epe(gt_rigid, rigid_pred)
non_rigid_epe = compute_epe(gt_non_rigid, non_rigid_pred)
outliers = outlier_err(gt, total_pred)
return [all_epe, rigid_epe, non_rigid_epe, outliers]
def flow_diff(gt, pred):
_, _, h_pred, w_pred = pred.size()
bs, nc, h_gt, w_gt = gt.size()
u_gt, v_gt = gt[:,0,:,:], gt[:,1,:,:]
pred = nn.functional.interpolate(pred, size=(h_gt, w_gt), mode='bilinear')
u_pred = pred[:,0,:,:] * (w_gt/w_pred)
v_pred = pred[:,1,:,:] * (h_gt/h_pred)
diff = torch.sqrt(torch.pow((u_gt - u_pred), 2) + torch.pow((v_gt - v_pred), 2))
return diff
def weighted_binary_cross_entropy(output, target, weights=None):
'''
#by consensus_depth_flow_mask called
:param output:
:param target:
:param weights:
:return:
'''
if weights is not None:
assert len(weights) == 2
loss = weights[1] * (target * torch.log(output + epsilon)) + \
weights[0] * ((1 - target) * torch.log(1 - output + epsilon))
else:
loss = target * torch.log(output + epsilon) + (1 - target) * torch.log(1 - output + epsilon)
return torch.neg(torch.mean(loss))
def outlier_err(gt, pred, tau=[3,0.05]):
_, _, h_pred, w_pred = pred.size()
bs, nc, h_gt, w_gt = gt.size()
u_gt, v_gt, valid_gt = gt[:,0,:,:], gt[:,1,:,:], gt[:,2,:,:]
pred = nn.functional.interpolate(pred, size=(h_gt, w_gt), mode='bilinear')
u_pred = pred[:,0,:,:] * (w_gt/w_pred)
v_pred = pred[:,1,:,:] * (h_gt/h_pred)
epe = torch.sqrt(torch.pow((u_gt - u_pred), 2) + torch.pow((v_gt - v_pred), 2))
epe = epe * valid_gt
F_mag = torch.sqrt(torch.pow(u_gt, 2)+ torch.pow(v_gt, 2))
E_0 = (epe > tau[0]).type_as(epe)
E_1 = ((epe / (F_mag+epsilon)) > tau[1]).type_as(epe)
n_err = E_0 * E_1 * valid_gt
#n_err = length(find(F_val & E>tau(1) & E./F_mag>tau(2)));
#n_total = length(find(F_val));
f_err = n_err.sum()/(valid_gt.sum() + epsilon);
if type(f_err) == Variable: f_err = f_err.data
return f_err.item()
def compute_epe(gt, pred):
_, _, h_pred, w_pred = pred.size()
bs, nc, h_gt, w_gt = gt.size()
u_gt, v_gt = gt[:,0,:,:], gt[:,1,:,:]
pred = nn.functional.interpolate(pred, size=(h_gt, w_gt), mode='bilinear')
u_pred = pred[:,0,:,:] * (w_gt/w_pred)
v_pred = pred[:,1,:,:] * (h_gt/h_pred)
epe = torch.sqrt(torch.pow((u_gt - u_pred), 2) + torch.pow((v_gt - v_pred), 2))
if nc == 3:
valid = gt[:,2,:,:]
epe = epe * valid
avg_epe = epe.sum()/(valid.sum() + epsilon)
else:
avg_epe = epe.sum()/(bs*h_gt*w_gt)
if type(avg_epe) == Variable: avg_epe = avg_epe.data
return avg_epe.item()
def compute_errors(gt, pred, crop=True):
abs_diff, abs_rel, sq_rel, a1, a2, a3 = 0,0,0,0,0,0
batch_size = gt.size(0)
'''
crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
construct a mask of False values, with the same size as target
and then set to True values inside the crop
'''
if crop:
crop_mask = gt[0] != gt[0]
y1,y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
x1,x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
crop_mask[y1:y2,x1:x2] = 1
for current_gt, current_pred in zip(gt, pred):
valid = (current_gt > 0) & (current_gt < 80)
if crop:
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid].clamp(1e-3, 80)
valid_pred = valid_pred * torch.median(valid_gt)/torch.median(valid_pred)
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
abs_diff += torch.mean(torch.abs(valid_gt - valid_pred))
abs_rel += torch.mean(torch.abs(valid_gt - valid_pred) / valid_gt)
sq_rel += torch.mean(((valid_gt - valid_pred)**2) / valid_gt)
return [metric / batch_size for metric in [abs_diff, abs_rel, sq_rel, a1, a2, a3]]
def compute_errors2(gt, pred, crop=True):
abs_diff, abs_rel, sq_rel, a1, a2, a3,epe = 0, 0, 0, 0, 0, 0,0
batch_size = gt.size(0)
'''
crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
construct a mask of False values, with the same size as target
and then set to True values inside the crop
'''
if crop:
crop_mask = gt[0] != gt[0]
y1, y2 = int(0.40810811 * gt.size(1)), int(0.99189189 * gt.size(1))
x1, x2 = int(0.03594771 * gt.size(2)), int(0.96405229 * gt.size(2))
crop_mask[y1:y2, x1:x2] = 1
for current_gt, current_pred in zip(gt, pred):
valid = (current_gt > 0) & (current_gt < 80)
if crop:
valid = valid & crop_mask
valid_gt = current_gt[valid]
valid_pred = current_pred[valid].clamp(1e-3, 80)
valid_pred = valid_pred * torch.median(valid_gt) / torch.median(valid_pred)
thresh = torch.max((valid_gt / valid_pred), (valid_pred / valid_gt))
a1 += (thresh < 1.25).float().mean()
a2 += (thresh < 1.25 ** 2).float().mean()
a3 += (thresh < 1.25 ** 3).float().mean()
abs_diff += torch.mean(torch.abs(valid_gt - valid_pred))
abs_rel += torch.mean(torch.abs(valid_gt - valid_pred) / valid_gt) * 100
sq_rel += torch.mean(((valid_gt - valid_pred) ** 2) / valid_gt) * 100
epe+= torch.mean((valid_gt - valid_pred) ** 2)
return [metric / batch_size for metric in [abs_diff, abs_rel, sq_rel, a1, a2, a3,epe]]
def VGSmap(map):
'''
:param map:[0~1]
:return:
'''
map*=255
pass
| 35.719653 | 130 | 0.631524 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,062 | 0.165796 |
981f4131a08264f22a6b10de9d991012715fd4c7 | 966 | py | Python | oops_fhir/r4/value_set/provenance_activity_type.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/provenance_activity_type.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | oops_fhir/r4/value_set/provenance_activity_type.py | Mikuana/oops_fhir | 77963315d123756b7d21ae881f433778096a1d25 | [
"MIT"
] | null | null | null | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_participation_type import v3ParticipationType
__all__ = ["ProvenanceActivityType"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class ProvenanceActivityType(ValueSet):
"""
Provenance activity type
This value set contains representative Activity Type codes, which
includes codes from the HL7 DocumentCompletion, ActStatus, and
DataOperations code system, W3C PROV-DM and PROV-N concepts and display
names, several HL7 Lifecycle Event codes for which there are agreed upon
definitions, and non-duplicated codes from the HL7 Security and Privacy
Ontology Operations codes.
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/provenance-activity-type
"""
# TODO: fix this template issue1
pass
class Meta:
resource = _resource
| 26.108108 | 78 | 0.773292 | 650 | 0.672878 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.596273 |
981f6d8c4555ef92f86c7b62daf2e7f20eb0fb71 | 3,312 | py | Python | py/adafruit-circuitpython-bundle-7.x-mpy-20211104/examples/is31fl3731_rgbmatrix5x5_rainbow.py | ParentZap/micro-projects | b260ac09057935f44d3d00ff96670fabfd567aac | [
"Apache-2.0"
] | null | null | null | py/adafruit-circuitpython-bundle-7.x-mpy-20211104/examples/is31fl3731_rgbmatrix5x5_rainbow.py | ParentZap/micro-projects | b260ac09057935f44d3d00ff96670fabfd567aac | [
"Apache-2.0"
] | null | null | null | py/adafruit-circuitpython-bundle-7.x-mpy-20211104/examples/is31fl3731_rgbmatrix5x5_rainbow.py | ParentZap/micro-projects | b260ac09057935f44d3d00ff96670fabfd567aac | [
"Apache-2.0"
] | null | null | null | # SPDX-FileCopyrightText: 2021 Sandy Macdonald, David Glaude, James Carr
# SPDX-License-Identifier: MIT
"""
Example to display a rainbow animation on the 5x5 RGB Matrix Breakout.
Usage:
Rename this file code.py and pop it on your Raspberry Pico's
CIRCUITPY drive.
This example is for use on the Pico Explorer Base or other board that use the same SDA/SCL pin.
Author(s): Sandy Macdonald, David Glaude, James Carr
"""
import time
import math
import busio
import board
from adafruit_is31fl3731.rgbmatrix5x5 import RGBmatrix5x5 as Display
def hsv_to_rgb(hue, sat, val):
# pylint: disable=too-many-return-statements
"""
Convert HSV colour to RGB
:param hue: hue; 0.0-1.0
:param sat: saturation; 0.0-1.0
:param val: value; 0.0-1.0
"""
if sat == 0.0:
return val, val, val
i = int(hue * 6.0)
p = val * (1.0 - sat)
f = (hue * 6.0) - i
q = val * (1.0 - sat * f)
t = val * (1.0 - sat * (1.0 - f))
i %= 6
if i == 0:
return val, t, p
if i == 1:
return q, val, p
if i == 2:
return p, val, t
if i == 3:
return p, q, val
if i == 4:
return t, p, val
if i == 5:
return val, p, q
# Will never reach here but it keeps pylint happier
return val, val, val
# Create the I2C bus on a Pico Explorer Base
i2c = busio.I2C(board.GP5, board.GP4)
# Set up 5x5 RGB matrix Breakout
display = Display(i2c)
def test_pixels(r, g, b):
# Draw each row from left to right, top to bottom
for y in range(0, 5):
for x in range(0, 5):
display.fill(0) # Clear display
display.pixelrgb(x, y, r, g, b)
time.sleep(0.05)
def test_rows(r, g, b):
# Draw full rows from top to bottom
for y in range(0, 5):
display.fill(0) # Clear display
for x in range(0, 5):
display.pixelrgb(x, y, r, g, b)
time.sleep(0.2)
def test_columns(r, g, b):
# Draw full columns from left to right
for x in range(0, 5):
display.fill(0) # Clear display
for y in range(0, 5):
display.pixelrgb(x, y, r, g, b)
time.sleep(0.2)
def test_rainbow_sweep():
step = 0
for _ in range(100):
for y in range(0, 5):
for x in range(0, 5):
pixel_hue = (x + y + (step / 20)) / 8
pixel_hue = pixel_hue - int(pixel_hue)
pixel_hue += 0
pixel_hue = pixel_hue - math.floor(pixel_hue)
rgb = hsv_to_rgb(pixel_hue, 1, 1)
display.pixelrgb(
x, y, int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)
)
time.sleep(0.01)
step += 3
while True:
test_pixels(64, 0, 0) # RED
test_pixels(0, 64, 0) # GREEN
test_pixels(0, 0, 64) # BLUE
test_pixels(64, 64, 64) # WHITE
test_rows(64, 0, 0) # RED
test_rows(0, 64, 0) # GREEN
test_rows(0, 0, 64) # BLUE
test_rows(64, 64, 64) # WHITE
test_columns(64, 0, 0) # RED
test_columns(0, 64, 0) # GREEN
test_columns(0, 0, 64) # BLUE
test_columns(64, 64, 64) # WHITE
test_rainbow_sweep()
| 24.533333 | 96 | 0.536534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,008 | 0.304348 |
98241ad4baa33294d1ccb68f952b1b1217195b7e | 10,405 | py | Python | src/GG_ESB.py | CB1204/LapSimulation | 7d7f7c43a6bc3db3dbf02050d939da3f17647c2c | [
"MIT"
] | 7 | 2018-02-22T16:58:26.000Z | 2022-02-05T18:17:56.000Z | src/GG_ESB.py | CB1204/LapSimulation | 7d7f7c43a6bc3db3dbf02050d939da3f17647c2c | [
"MIT"
] | null | null | null | src/GG_ESB.py | CB1204/LapSimulation | 7d7f7c43a6bc3db3dbf02050d939da3f17647c2c | [
"MIT"
] | 2 | 2019-04-15T21:07:03.000Z | 2021-05-11T07:41:49.000Z | from TwoDimLookup_motor import TwoDimLookup_motor
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from scipy import interpolate
import numpy as np
### Input Parameters
# C_F = 27000 # Cornering stiffness front / Schräglaufsteifigkeit vorne [N/rad] - Is already for two wheels !
# C_R = 35000 # Cornering stiffness rear / Schräglaufsteifigkeit hinten [N/rad] - Is already for two wheels !
# m = 350 # Mass vehicle + driver [kg]
# CoG_X = 0.65 # Actual position of CG (0 at front axis, 1 at rear axis)
#
# mu = 1.4 # Friction coefficient
# alpha = 7.5 # Slip angle [deg]
# DriveType = 2WD or 4WD
# res = resolution = number of data points
class GG_ESB_OneDim:
def __init__(self, C_F, C_R, m, CoG_X, mu, alpha, DriveType):
self.C_F = C_F
self.C_R = C_R
self.m = m
self.CoG_X = CoG_X
self.mu = mu
self.alpha = alpha
self.DriveType = DriveType
# General constants
self.deg2rad = np.pi/180
self.g = 9.81
def GG_ESB_ay_Max(self):
# calculate ay max
FY_F = self.alpha * self.deg2rad * self.C_F
FY_R = self.alpha * self.deg2rad * self.C_R
FY_ovr = FY_F + FY_R
ay_max = FY_ovr / self.m
#ay_max = 14
return ay_max
def GG_ESB_ax_Max(self):
# calculate ax max
FZ_F = (1-self.CoG_X) * self.m * self.g
FZ_R = self.CoG_X * self.m * self.g
FX_F = self.mu * FZ_F
FX_R = self.mu * FZ_R
if self.DriveType == '2WD':
FX_ovr = FX_R
elif self.DriveType == '4WD':
FX_ovr = FX_F + FX_R
ax_max = FX_ovr / self.m
#ax_max = 7
return ax_max
def Plot_gg(self, ay_max, ax_max):
# Calculate values
resolution = 1000
ay = np.linspace((-1)*ay_max, ay_max, resolution)
ax_upper =((1-(ay**2 / ay_max**2)) * ax_max**2)**0.5
if self.DriveType == '2WD':
ax_lower = (-2) * ax_upper
if self.DriveType == '4WD':
ax_lower = (-1) * ax_upper
ay = ay /self.g
ax_U_inG = np.asarray(ax_upper)/self.g
ax_L_inG = np.asarray(ax_lower)/self.g
# # Plot g-g diagram
# plt.plot(ay, ax_U_inG , 'b')
# plt.plot(ay, ax_L_inG, 'b')
# plt.xlabel('ay [g]' + '\n' + ('ay max: ' + str(np.round(ay_max/self.g,2)) + ' g'), fontsize = 16)
# plt.ylabel('ax [g]' + '\n' + ('ax max: ' + str(np.round(ax_max/self.g,2)) + ' g'), fontsize = 16)
# plt.title('g-g diagram', fontsize = 20, y=1.03)
# plt.grid(True)
# plt.show()
########################################################
class GG_ESB_TwoDim:
def __init__(self, C_F, C_R, m, CoG_X, mu, alpha, CoP_X, C_la, rho, DriveType, gearRatio, tireRadius, fr, Lift2Drag ):
self.C_F = C_F
self.C_R = C_R
self.m = m
self.CoG_X = CoG_X
self.mu = mu
self.alpha = alpha
self.CoP_X = CoP_X # = 0.61
self.C_la = C_la # = 3.52 m^2
self.rho = rho
self.DriveType = DriveType #2WD or 4WD
self.gearRatio = gearRatio
self.tireRadius = tireRadius
self.fr = fr
self.Lift2Drag = Lift2Drag
# General constants
self.deg2rad = np.pi/180
self.g = 9.81
def GGV_Map(self):
ax_upper_values = []
ax_lower_values = []
ay_values = []
speed_values =[]
Speed_resolution = 200
ay_resolution = 500
# initialize speed
VehicleSpeed = np.linspace(0.001, 50, Speed_resolution) #Speed in m/s
#loop through different velocities
for i in range(len(VehicleSpeed)):
#find maximum lateral acceleration
ay_max = self.GG_ESB_ay_Max()
ay = np.linspace((-1)*ay_max, ay_max, ay_resolution)
ax_max_upper = self.GG_ESB_ax_Max_upper(VehicleSpeed[i])
ax_up = []
for j in range(len(ay)):
ax_up.append(((1-(ay[j]**2 / ay_max**2)) * ax_max_upper**2)**0.5)
ax_max_lower = self.GG_ESB_ax_Max_lower(VehicleSpeed[i])
ax_low = []
for j in range(len(ay)):
ax_low.append(-1*((1-(ay[j]**2 / ay_max**2)) * ax_max_lower**2)**0.5)
speed = []
for j in range(len(ay)):
speed.append(VehicleSpeed[i])
ax_upper_values.append(ax_up)
ax_lower_values.append(ax_low)
ay_values.append(ay)
speed_values.append(speed)
# Load Motor map
motor = TwoDimLookup_motor(self.gearRatio, self.tireRadius, self.CoG_X, self.m, self.CoP_X, self.C_la, self.rho, self.fr, self.Lift2Drag, self.DriveType)
ax_motor = motor.ax_motor(speed_values)
# Replace ax values which are higher than ax_motor
for i in range(len(ax_upper_values)):
for j in range(len (ax_upper_values[i])):
if ax_upper_values[i][j] > ax_motor[i]:
ax_upper_values[i][j] = ax_motor[i]
return ax_upper_values, ax_lower_values, ay_values, speed_values
def GG_ESB_ay_Max(self):
# calculate ay max
FY_F = self.alpha * self.deg2rad * self.C_F
FY_R = self.alpha * self.deg2rad * self.C_R
FY_ovr = FY_F + FY_R
ay_max = FY_ovr / self.m
return ay_max
def GG_ESB_ax_Max_upper(self, VehicleSpeed):
# calculate ax max
# Mass
FZ_m_F = (1-self.CoG_X) * self.m * self.g
FZ_m_R = self.CoG_X * self.m * self.g
#Downforce
FZ_d_F = (1-self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
FZ_d_R = (self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
# FZ ovr
FZ_ovr_F = FZ_m_F + FZ_d_F
FZ_ovr_R = FZ_m_R + FZ_d_R
#FX
FX_F = self.mu * FZ_ovr_F
FX_R = self.mu * FZ_ovr_R
if self.DriveType == '2WD':
FX_ovr = FX_R
elif self.DriveType == '4WD':
FX_ovr = FX_F + FX_R
ax_max_upper = FX_ovr / self.m
return ax_max_upper
def GG_ESB_ax_Max_lower(self, VehicleSpeed):
# calculate ax max
# Mass
FZ_m_F = (1-self.CoG_X) * self.m * self.g
FZ_m_R = self.CoG_X * self.m * self.g
#Downforce
FZ_d_F = (1-self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
FZ_d_R = (self.CoP_X) * self.C_la * 0.5*self.rho * VehicleSpeed**2
# FZ ovr
FZ_ovr_F = FZ_m_F + FZ_d_F
FZ_ovr_R = FZ_m_R + FZ_d_R
#FX
FX_F = self.mu * FZ_ovr_F
FX_R = self.mu * FZ_ovr_R
#FX ovr
FX_ovr = FX_F + FX_R
ax_max_lower = FX_ovr / self.m
return ax_max_lower
def Plot_ggV(self, ax_upper_values, ax_lower_values, ay_values, speed_values ):
fig = plt.figure()
ax = fig.gca(projection='3d')
surf_upper = ax.plot_surface(ay_values, speed_values, ax_upper_values, color = 'r')
surf_lower = ax.plot_surface(ay_values, speed_values, ax_lower_values)
ax.set_xlabel('ay [m/s^2]')
ax.set_ylabel('speed [m/s]')
ax.set_zlabel('ax [m/s^2]')
plt.show()
def Plot_Long_Speed_Distance(self):
# Calculate motor
VehicleSpeed = np.array([np.linspace(0.0001, 60, 200), np.zeros(200)])
VehicleSpeed = np.transpose(VehicleSpeed).tolist()
Motor = TwoDimLookup_motor(self.gearRatio, self.tireRadius, self.CoG_X, self.m, self.CoP_X, self.C_la, self.rho, self.fr, self.Lift2Drag, self.DriveType)
ax_Motor = Motor.ax_motor(VehicleSpeed)
speed = np.linspace(0.0001, 60, 200)
a = interpolate.interp1d(speed, ax_Motor, bounds_error=False, fill_value=0)
#Calculate speed and distance
v = [0.0001]
s = [0]
t = [0]
delta_t = 0.005 # sec
t_end = 6
while t[-1] <= t_end:
a_v = a(v[-1])
v_t = a_v * delta_t + v[-1]
s_t = 0.5 * a_v * delta_t**2 + v[-1] * delta_t + s[-1]
v.append(v_t)
s.append(s_t)
t.append(t[-1] + delta_t)
# Find important vehicle parameters
# From 1 to 100 kph
index1 = np.nonzero(np.asarray(v) <= 100/3.6)
index_0to100 = np.max(index1)
# Time for 75 m acceleration
index2 = np.nonzero(np.asarray(s) <= 75)
#print(index2)
index_75m = np.max(index2)
# Graphs
# f, axarr = plt.subplots(2, sharex=True)
# axarr[0].plot(t, v,'b', label = 'Vehicle speed', linewidth = 1.5)
# axarr[0].plot(t[index_0to100], v[index_0to100], marker = '+', markersize = 15, markeredgewidth = 2.5, markerfacecolor = 'r', markeredgecolor = 'r', linestyle = 'None', label = 'Time from 0 to 100 kph: ' + str(np.round(t[index_0to100],2)) + ' s')
# axarr[0].set_title('Vehicle Speed vs. Time')
# axarr[0].set_xlabel('Time [s]')
# axarr[0].set_ylabel('Vehicle speed [m/s]')
# axarr[0].grid(True)
# axarr[0].legend(numpoints=1, shadow=True, fancybox=True)
#
# axarr[1].plot(t, s, 'g', label = 'Driven Distance', linewidth = 1.5)
# axarr[1].plot(t[index_75m], s[index_75m], marker = '+', markersize = 15, markeredgewidth = 2.5, markerfacecolor = 'r', markeredgecolor = 'r', linestyle = 'None', label = 'Time for 75 meters acceleration: ' + str(np.round(t[index_75m],2)) + ' s')
# axarr[1].set_title('Driven Distance vs. Time')
# axarr[1].set_xlabel('Time [s]')
# axarr[1].set_ylabel('Driven Distance [m]')
# axarr[1].grid(True)
# axarr[1].legend(numpoints=1, shadow=True, fancybox=True)
#
# plt.show()
| 33.242812 | 257 | 0.528304 | 7,898 | 0.758766 | 0 | 0 | 0 | 0 | 0 | 0 | 2,917 | 0.280238 |
98243878adcb6218f1936ee177c9f2cd4bab733f | 81 | py | Python | models/env.py | claranet/cloud-deploy | a1277f5a1173efffbaeb298c9d22ec0aa39c62e7 | [
"Apache-2.0"
] | 25 | 2018-03-27T13:26:17.000Z | 2022-02-02T09:24:25.000Z | models/env.py | claranet/cloud-deploy | a1277f5a1173efffbaeb298c9d22ec0aa39c62e7 | [
"Apache-2.0"
] | null | null | null | models/env.py | claranet/cloud-deploy | a1277f5a1173efffbaeb298c9d22ec0aa39c62e7 | [
"Apache-2.0"
] | 5 | 2018-05-08T16:09:57.000Z | 2021-08-04T13:12:36.000Z | env = ['prod', 'preprod', 'dev', 'staging', 'test', 'demo', 'int', 'uat', 'oat']
| 40.5 | 80 | 0.506173 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 56 | 0.691358 |
982572d26aa32e7a439ebea52534db824d172dbe | 5,936 | py | Python | magic-wand/accelerometers/adxl345/adxl345_const.py | uraich/EdgeComputing | 2819fa4e37cd2d81e806813aa0f713eaa846baec | [
"MIT"
] | null | null | null | magic-wand/accelerometers/adxl345/adxl345_const.py | uraich/EdgeComputing | 2819fa4e37cd2d81e806813aa0f713eaa846baec | [
"MIT"
] | null | null | null | magic-wand/accelerometers/adxl345/adxl345_const.py | uraich/EdgeComputing | 2819fa4e37cd2d81e806813aa0f713eaa846baec | [
"MIT"
] | null | null | null | from micropython import const
ADXL345_ADDRESS = const(0x53) # I2C address of adxl345
ADXL345_DEVICE_ID = const(0xe5) #
ADXL345_DEVID = const(0x00) # Device ID
ADXL345_THESH_TAP = const(0x1d) # Tap threshold
ADXL345_OFSX = const(0x1e) # X-axis offset
ADXL345_OFSY = const(0x1f) # y-axis offset
ADXL345_OFSZ = const(0x20) # z-axis offset
ADXL345_DUR = const(0x21) # Tap duration
ADXL345_LATENT = const(0x22) # Tap latency
ADXL345_WINDOW = const(0x23) # Tap window
ADXL345_THRESH_ACT = const(0x24) # Activity threshold
ADXL345_THRESH_INACT = const(0x25) # Inactivity threshold
ADXL345_TIME_INACT = const(0x26) # Inactivity time
ADXL345_ACT_INACT_CTL = const(0x27) # Axis enable for activity and inactivity detection
ADXL345_THRESH_FF = const(0x28) # Free-fall threshold
ADXL345_TIME_FF = const(0x29) # Free-fall time
ADXL345_TAP_AXIS = const(0x2a) # Axis control for single/touble tap
ADXL345_ACT_TAP_STATUS = const(0x2b) # Source of single/touble tap
ADXL345_BW_RATE = const(0x2c) # Data rate and power mode control
ADXL345_POWER_CTL = const(0x2d) # Power-saving features control
ADXL345_INT_ENABLE = const(0x2e) # Interrupt enable control
ADXL345_INT_MAP = const(0x2f) # Interrupt mapping control
ADXL345_INT_SOURCE = const(0x30) # Source of Interrupts
ADXL345_DATA_FORMAT = const(0x31) # Data format control
ADXL345_DATAAX0 = const(0x32) # X-Axis Data 0
ADXL345_DATAAX1 = const(0x33) # X-Axis Data 1
ADXL345_DATAAY0 = const(0x34) # Y-Axis Data 0
ADXL345_DATAAY1 = const(0x35) # Y-Axis Data 1
ADXL345_DATAAZ0 = const(0x36) # Z-Axis Data 0
ADXL345_DATAAZ1 = const(0x37) # Z-Axis Data 1
ADXL345_FIFO_CTL = const(0x38) # FIFO control
ADXL345_FIFO_STATUS = const(0x39) # FIFO status
# Bit definitions
# ACT_INACT_CTL
INACT_Z_EN = const(0)
INACT_Y_EN = const(1)
INACT_X_EN = const(2)
INACT_AC_DC = const(3)
ACT_Z_EN = const(4)
ACT_Y_EN = const(5)
ACT_X_EN = const(6)
ACT_AC_DC = const(7)
# TAP AXES
TAP_X_EN = const(0)
TAP_Y_EN = const(1)
TAP_Z_EN = const(2)
SUPPRESS = const(3)
# ACT_TAP_STATUS
TAP_Z_SOURCE = const(0)
TAP_Y_SOURCE = const(1)
TAP_X_SOURCE = const(2)
ASLEEP = const(3)
ACT_Z_SOURCE = const(4)
ACT_Y_SOURCE = const(5)
ACT_X_SOURCE = const(6)
# BW_RATE
RATE = const(3)
RATE_SIZE = const(4)
LOW_POWER = const(4)
LOW_POWER_SIZE = const(2)
# POWER_CTL
WAKEUP = const(1)
WAKEUP_SIZE = const(2)
SLEEP = const(2)
MEASURE = const(3)
AUTO_SLEEP = const(5)
AUTO_SLEEP_SIZE = const(2)
LINK = const(5)
# INT_ENABLE
OVERRUN = const(0)
WATERMARK = const(1)
FREE_FALL = const(2)
INACTIVITY = const(3)
ACTIVITY = const(4)
DOUBLE_TAP = const(5)
SINGLE_TAP = const(6)
DATA_READY = const(7)
# DATA_FORMAT
RANGE = const(1)
RANGE_SIZE = const(2)
JUSTIFY = const(2)
FULL_RES = const(3)
FULL_RES_SIZE = const(2)
INT_INVERT = const(5)
SPI = const(6)
SELF_TEST = const(7)
# FIFO_CTL
SAMPLES = const(4)
SAMPLES_SIZE = const(5)
TRIGGER = const(5)
FIFO_TYPE = const(7)
FIFO_TYPE_SIZE = const(2)
MODE_BYPASS = const(0)
MODE_FIFO = const(1)
MODE_STREAM = const(2)
MODE_TRIGGER = const(3)
# FIFO_STATUS
FIFO_ENTRIES = const(5)
FIFO_ENTRIES_SIZE = const(6)
FIFO_TRIG = const(7)
# Data rates
RATE_3200 = const(0xf)
RATE_1600 = const(0xe)
RATE_800 = const(0xd)
RATE_400 = const(0xc)
RATE_200 = const(0xb)
RATE_100 = const(0xa)
RATE_50 = const(0x9)
RATE_25 = const(0x8)
RATE_12_5 = const(0x7)
RATE_6_25 = const(0x6)
RATE_3_13 = const(0x5)
RATE_0_78 = const(0x4)
RATE_0_39 = const(0x3)
RATE_0_20 = const(0x1)
RATE_0_10 = const(0x0)
# Acceleration full range
ACCEL_2G = const(0)
ACCEL_4G = const(1)
ACCEL_8G = const(2)
ACCEL_16G = const(3)
# FIFO
BYPASS = const(0)
FIFO_MODE = const(1)
STREAM_MODE = const(2)
TRIGGER_MODE = const(3)
# POWER_CTL Wakeup frequency
WAKE_UP_8_HZ = const(0)
WAKE_UP_4_HZ = const(1)
WAKE_UP_2_HZ = const(2)
WAKE_UP_1_HZ = const(3)
| 39.573333 | 98 | 0.473214 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 824 | 0.138814 |
9825a6859e917fea31bf98fd074b4e0de54be3a9 | 3,885 | py | Python | genes.py | AgamChopra/simulation-in-a-box | 2a346a2fc83d79e542b64f1bd45c338d27a1934d | [
"MIT"
] | null | null | null | genes.py | AgamChopra/simulation-in-a-box | 2a346a2fc83d79e542b64f1bd45c338d27a1934d | [
"MIT"
] | null | null | null | genes.py | AgamChopra/simulation-in-a-box | 2a346a2fc83d79e542b64f1bd45c338d27a1934d | [
"MIT"
] | null | null | null | import random
global_mutation = 0.002
encodings = {'0': '00000', '1': '00001', '2': '00010', '3': '00011', '4': '00100',
'5': '00101', 'A': '00110', 'B': '00111', 'C': '01000', 'D': '01001',
'E': '01010', 'F': '01011', 'G': '01100', 'H': '01101', 'I': '01110',
'J': '01111', 'K': '10000', 'L': '10001', 'M': '10010', 'N': '10011',
'O': '10100', 'P': '10101', 'Q': '10110', 'R': '10111', 'S': '11000',
'T': '11001', 'U': '11010', 'V': '11011', 'W': '11100', 'X': '11101',
'Y': '11110', 'Z': '11111'}
decodings = dict([(value, key) for key, value in encodings.items()])
def untangle(gene):
sequence = ''
for char in gene:
sequence += encodings[char]
return sequence
def mutate(sequence):
sequence = list(sequence)
if global_mutation > random.random():
i = random.randrange(0, len(sequence))
sequence[i] = '1' if sequence[i] == '0' else '0'
return ''.join(sequence)
def tangle(sequence):
gene = []
[gene.append(decodings[sequence[i:i+5]])
for i in range(0, len(sequence), 5)]
return ''.join(gene)
def bin_to_float(string):
num1 = sum([int(string[1 + i]) * 2 ** (10 - i) for i in range(11)])
num2 = sum([int(string[12 + i]) * 2 ** -(1 + i) for i in range(0,11)])
return num1 + num2 if string[0] == '0' else -(num1 + num2)
def bin_to_rgb_illum(string):
#8 bit each for r,g,b. 1 bit for illumination(yes/no).
#ex- 'ZZZZZ' -> (255,255,255,True)
r = sum([int(string[i]) * 2 ** (7 - i) for i in range(8)])
g = sum([int(string[8 + i]) * 2 ** (7 - i) for i in range(8)])
b = sum([int(string[16 + i]) * 2 ** (7 - i) for i in range(8)])
i = string[-1] == '1'
return (r,g,b,i)
def split_seq(utg):
source = utg[0] # input or hidden
source_id = utg[1:8] # address of either input or hidden
sink_type = utg[8] # sink/aka the output. output neuron or hidden neuron
sink_id = utg[9:16] # id of output neuron or output hidden neuron
recurrent = utg[16] # if the neuron has memory
weight = utg[17:]# value of weight's first bit represents the sign(0:+ve,1:-ve) # weight = [sign] [11 bits] [.] [11 bits]. ex- 1 11111111111 . 11111111111 -> -2047.99951171875
##lr = utg[40:] sequence of 5 bits
return source, source_id, sink_type, sink_id, recurrent, weight
def main():
gene = 'AX0W1ZXX' #<- how gene is stored(in memory) and displayed(to user)
color = '0X0A0'
#print(gene, untangle(gene))
#print(color, untangle(color))
for i in range(5000):
utg = untangle(gene) #<- gene is untangled to a binary sequence to be used by the cell
utg = mutate(utg) #<- during reproduction, there is a small chance(global_mutation factor) that a bit gets flipped in the untangled binary gene sequence.
gene = tangle(utg) #<- After reproduction, the gene is tangled and stored in memory.
utg2 = untangle(color)
utg2 = mutate(utg2)
color = tangle(utg2)
if((i+1) % 50 == 0):
#print(gene, untangle(gene))
source, source_id, sink_type, sink_id, recurrent, weight = split_seq(utg)
#print(source, source_id, sink_type, sink_id, recurrent, weight)
print('weight:', bin_to_float(weight))
#print(color,untangle(color))
print('color:',bin_to_rgb_illum(untangle(color)))
#%%
if __name__ == '__main__':
main()
# Neuron gene -> 40 bits
# Color gene -> 24 bits (r,g,b) + 1 unused bit
# Vital gene (Max dv, Max Energy, Current Energy, Food Type(Self/Generator(Photo or Thermal),Predator,Scavenger,Paracitic,Combination), Preferred food color(only if cell is predator or both),
# reproduction style(uni,2 parent, both), ...) | 42.692308 | 193 | 0.568597 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,689 | 0.434749 |
98262718730e2b662ae1afeb5b66ed50c8d798c1 | 4,543 | py | Python | src/apetest/cmdline.py | boxingbeetle/apetest | c6dd7aaca014c64eec4bde7e755c4a3dec72404a | [
"BSD-3-Clause"
] | 6 | 2019-04-01T09:42:31.000Z | 2020-05-20T15:23:17.000Z | src/apetest/cmdline.py | boxingbeetle/apetest | c6dd7aaca014c64eec4bde7e755c4a3dec72404a | [
"BSD-3-Clause"
] | 31 | 2019-02-04T11:38:32.000Z | 2022-03-03T02:51:15.000Z | src/apetest/cmdline.py | boxingbeetle/apetest | c6dd7aaca014c64eec4bde7e755c4a3dec72404a | [
"BSD-3-Clause"
] | null | null | null | # SPDX-License-Identifier: BSD-3-Clause
"""Command line interface."""
from argparse import ArgumentParser
from os import getcwd
from typing import List
from urllib.parse import urljoin, urlparse
import logging
from apetest.checker import Accept, PageChecker
from apetest.plugin import (
Plugin,
PluginCollection,
add_plugin_arguments,
create_plugins,
load_plugins,
)
from apetest.report import Scribe
from apetest.request import Request
from apetest.spider import spider_req
from apetest.version import VERSION_STRING
def detect_url(arg: str) -> str:
"""Attempt to turn a command line argument into a full URL."""
url = urlparse(arg)
if url.scheme in ("http", "https"):
return arg
if arg.startswith("/"):
# Assume absolute file path.
return urljoin("file://", arg)
url = urlparse("http://" + arg)
idx = url.netloc.find(":")
if idx != -1 and url.netloc[idx + 1 :].isdigit():
# Host and port without scheme, assume HTTP.
return "http://" + arg
# Assume relative file path.
return urljoin(f"file://{getcwd()}/", arg)
def run(
url: str, report_file_name: str, accept: Accept, plugins: PluginCollection
) -> int:
"""
Runs APE with the given arguments.
@param url:
Base URL of the web site or app to check.
@param report_file_name:
Path to write the HTML report to.
@param accept:
Document types that we tell the server that we accept.
@param plugins:
Plugins to use on this run.
@return:
0 if successful, non-zero on errors.
"""
try:
try:
first_req = Request.from_url(detect_url(url))
except ValueError as ex:
print("Bad URL:", ex)
return 1
spider, robots_report = spider_req(first_req)
base_url = first_req.page_url
scribe = Scribe(base_url, spider, plugins)
if robots_report is not None:
scribe.add_report(robots_report)
checker = PageChecker(accept, scribe, plugins)
print(f'Checking "{base_url}" and below...')
for request in spider:
referrers = set(checker.check(request))
spider.add_requests(request, referrers)
print("Done checking")
print(f'Writing report to "{report_file_name}"...')
with open(
report_file_name, "w", encoding="ascii", errors="xmlcharrefreplace"
) as out:
out.write(scribe.present().flatten())
print("Done reporting")
scribe.postprocess()
print("Done post processing")
return 0
finally:
plugins.close()
def main() -> int:
"""
Parse command line arguments and call L{run} with the results.
This is the entry point that gets called by the wrapper script.
"""
# Register core arguments.
parser = ArgumentParser(
description="Automated Page Exerciser: "
"smarter-than-monkey testing for web apps",
epilog="This is a test tool; do not use on production sites.",
)
parser.add_argument("url", metavar="URL|PATH", help="web app/site to check")
parser.add_argument(
"--accept",
type=str,
choices=("any", "html"),
default="any",
help="accept serialization: any (HTML or XHTML; default) or HTML only",
)
parser.add_argument(
"report", metavar="REPORT", help="file to write the HTML report to"
)
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="increase amount of logging, can be passed multiple times",
)
parser.add_argument(
"-V", "--version", action="version", version=f"APE {VERSION_STRING}"
)
# Let plugins register their arguments.
plugin_modules = tuple(load_plugins())
for module in plugin_modules:
add_plugin_arguments(module, parser)
args = parser.parse_args()
level_map = {0: logging.WARNING, 1: logging.INFO, 2: logging.DEBUG}
level = level_map.get(args.verbose, logging.DEBUG)
logging.basicConfig(level=level, format="%(levelname)s: %(message)s")
# Instantiate plugins.
plugin_list: List[Plugin] = []
for module in plugin_modules:
try:
plugin_list += create_plugins(module, args)
except Exception: # pylint: disable=broad-except
return 1
accept = Accept[args.accept.upper()]
plugins = PluginCollection(plugin_list)
return run(args.url, args.report, accept, plugins)
| 29.309677 | 80 | 0.630861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,576 | 0.346907 |
98263a506a10c5fd1066a4f40f9a20f4baf4d255 | 3,995 | py | Python | rplugin/python3/denite/source/func.py | delphinus/npm.nvim | 84d72993e9660c68d1c6846d7f6e839a95df325d | [
"MIT"
] | 19 | 2018-03-28T11:44:37.000Z | 2022-03-16T17:49:05.000Z | rplugin/python3/denite/source/func.py | delphinus/npm.nvim | 84d72993e9660c68d1c6846d7f6e839a95df325d | [
"MIT"
] | 2 | 2018-10-21T10:11:56.000Z | 2018-10-21T10:28:12.000Z | rplugin/python3/denite/source/func.py | delphinus/npm.nvim | 84d72993e9660c68d1c6846d7f6e839a95df325d | [
"MIT"
] | 4 | 2018-10-21T09:58:12.000Z | 2021-12-24T02:12:52.000Z | # ============================================================================
# FILE: func.py
# AUTHOR: Qiming Zhao <chemzqm@gmail.com>
# License: MIT license
# ============================================================================
# pylint: disable=E0401,C0411
import os
import subprocess
from denite import util
from .base import Base
def _find_root(path):
while True:
if path == '/' or os.path.ismount(path):
return None
p = os.path.join(path, 'package.json')
if os.access(p, os.R_OK):
return path
path = os.path.dirname(path)
def run_command(commands, cwd, stdin=None):
try:
p = subprocess.run(commands,
cwd=cwd,
input=stdin,
encoding="utf8",
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return []
return p.stdout.split('\n')
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.name = 'func'
self.kind = 'file'
def on_init(self, context):
cwd = os.path.normpath(self.vim.call('getcwd'))
context['__root'] = _find_root(cwd)
def highlight(self):
self.vim.command('highlight default link deniteSource_funcFile Comment')
self.vim.command('highlight default link deniteSource_funcLinenr LineNR')
def define_syntax(self):
self.vim.command(r'syntax match deniteSource_funcHeader /^.*$/ '
r'containedin=' + self.syntax_name)
self.vim.command(r'syntax match deniteSource_funcFile /[^:]*: / '
r'contained containedin=deniteSource_funcHeader')
self.vim.command(r'syntax match deniteSource_funcLinenr /\s*\d\+: / '
r'contained containedin=deniteSource_funcHeader')
self.vim.command(r'syntax match deniteSource_funcSeparator /:/ conceal '
r'contained containedin=deniteSource_funcHeader')
def gather_candidates(self, context):
if not context['__root']:
util.error(self.vim, 'package.json not found')
return []
root = context['__root']
args = dict(enumerate(context['args']))
t = args.get(0, '')
cmds = [self.vim.eval('g:npm_parsefunc_command')]
curpath = os.path.normpath(self.vim.call('expand', '%:p'))
relpath = os.path.relpath(curpath, root)
if t == 't':
cmds += ['-m', 'this']
elif t == 'm':
name = args.get(1, '')
if name:
cmds += ['-m', name]
else:
cmds += ['-a']
elif t == 'r':
cmds += ['-r', relpath]
elif t == 'e':
cmds += ['-m', relpath]
else:
cmds += [relpath]
candidates = []
if not len(t):
stdin = "\n".join(self.vim.call('getline', 1, '$'))
lines = run_command(cmds, root, stdin)
else:
lines = run_command(cmds, root)
for line in lines:
if not line:
continue
parts = line.split(':')
filepath = relpath if parts[0] == 'stdin' else parts[0]
if parts[0] == 'stdin':
actionpath = curpath
abbr = os.path.basename(curpath)
elif os.path.isabs(parts[0]):
actionpath = parts[0]
abbr = os.path.relpath(filepath, os.path.join(root, 'node_modules'))
else:
actionpath = os.path.join(root, parts[0])
abbr = os.path.relpath(actionpath, root)
candidates.append({
'word': parts[2],
'abbr': '%s: %s: %s' % (abbr, parts[1], parts[2]),
'action__path': actionpath,
'action__line': parts[1],
})
return candidates
| 35.990991 | 84 | 0.501627 | 2,988 | 0.747935 | 0 | 0 | 0 | 0 | 0 | 0 | 1,013 | 0.253567 |
98264fc762074b4a2f1ac32520efa38012b03b13 | 11,835 | py | Python | SoccerPlanner/app/forms.py | guestnone/SoccerPlanner | 8ca6374bbdd2fa71ef4fecfb8aaa02a23d59a4d5 | [
"MIT"
] | null | null | null | SoccerPlanner/app/forms.py | guestnone/SoccerPlanner | 8ca6374bbdd2fa71ef4fecfb8aaa02a23d59a4d5 | [
"MIT"
] | 2 | 2019-06-04T15:21:26.000Z | 2019-06-27T18:14:25.000Z | SoccerPlanner/app/forms.py | guestnone/SoccerPlanner | 8ca6374bbdd2fa71ef4fecfb8aaa02a23d59a4d5 | [
"MIT"
] | 3 | 2019-04-16T08:39:31.000Z | 2019-05-07T09:05:26.000Z | """
Definition of forms.
"""
from django import forms
from django.forms import ModelForm, DateInput, TextInput
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth.models import User
from app.models import (
Player,
TeamSquad,
Team,
Stage,
Match,
Event,
Tournament,
ShootersMatch,
)
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import UpdateView
from snowpenguin.django.recaptcha2.fields import ReCaptchaField
from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
class BootstrapAuthenticationForm(AuthenticationForm):
"""Authentication form which uses boostrap CSS."""
username = forms.CharField(
max_length=254,
widget=forms.TextInput({"class": "form-control", "placeholder": "User name"}),
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput(
{"class": "form-control", "placeholder": "Password"}
),
)
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=False, help_text="Optional.")
last_name = forms.CharField(max_length=30, required=False, help_text="Optional.")
email = forms.EmailField(
max_length=254, help_text="Required. Inform a valid email address."
)
class Meta:
model = User
fields = (
"username",
"first_name",
"last_name",
"email",
"password1",
"password2",
)
class EventForm(ModelForm):
class Meta:
model = Event
# datetime-local is a HTML5 input type, format to make date time show on fields
widgets = {
"start_time": DateInput(
attrs={"type": "datetime-local"}, format="%Y-%m-%dT%H:%M"
),
"end_time": DateInput(
attrs={"type": "datetime-local"}, format="%Y-%m-%dT%H:%M"
),
}
fields = "__all__"
def __init__(self, *args, **kwargs):
super(EventForm, self).__init__(*args, **kwargs)
# input_formats parses HTML5 datetime-local input to datetime field
self.fields["start_time"].input_formats = ("%Y-%m-%dT%H:%M",)
self.fields["end_time"].input_formats = ("%Y-%m-%dT%H:%M",)
class StageForm(ModelForm):
name = forms.CharField(max_length=20, required=True)
listOfMatches = forms.ModelChoiceField(
queryset=Match.objects.all(), required=False, label="Match ", empty_label=None
)
class Meta:
model = Stage
listOfMatches = [Match]
fields = ("name", "listOfMatches")
class MyModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return (
obj.name
+ " "
+ obj.listOfMatches.team1.name
+ " "
+ obj.listOfMatches.team2.name
)
class StageEditForm(ModelForm):
listOfStages = MyModelChoiceField(
queryset=Stage.objects.all(), label="Stage ", required=True, empty_label=None
)
listOfMatches = forms.ModelChoiceField(
queryset=Match.objects.all(), required=True, label="Match ", empty_label=None
)
class Meta:
model = Stage
fields = ("listOfStages", "name", "listOfMatches")
class TeamSquadForm(ModelForm):
name = forms.CharField(max_length=20, required=True, help_text="Required.")
playerID = forms.ModelMultipleChoiceField(
queryset=Player.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="Player ",
required=True,
)
class Meta:
model = TeamSquad
fields = ("name", "playerID")
class TeamForm(ModelForm):
name = forms.CharField(max_length=20, required=True, help_text="Required.")
country = forms.CharField(max_length=20, required=True, help_text="Required.")
squad = forms.ModelChoiceField(
queryset=TeamSquad.objects.all(), required=True, empty_label="(Nothing)"
)
class Meta:
model = Team
fields = ("name", "country", "squad")
class TeamSquadEditForm(ModelForm):
listOfSquads = forms.ModelChoiceField(
queryset=TeamSquad.objects.all(),
label="Squad ",
required=True,
empty_label="(Nothing)",
)
playerID = forms.ModelMultipleChoiceField(
queryset=Player.objects.all(),
widget=forms.CheckboxSelectMultiple,
label="Player ",
required=True,
)
class Meta:
model = TeamSquad
fields = ("listOfSquads", "name", "playerID")
class TeamEditForm(ModelForm):
listOfTeams = forms.ModelChoiceField(
queryset=Team.objects.all(),
label="Team ",
required=True,
empty_label="(Nothing)",
)
squad = forms.ModelChoiceField(
queryset=TeamSquad.objects.all(), required=True, empty_label="(Nothing)"
)
class Meta:
model = Team
fields = ("listOfTeams", "name", "country", "squad")
class TeamSquadDeleteForm(ModelForm):
listOfSquads = forms.ModelChoiceField(
queryset=TeamSquad.objects.all(),
label="Squad ",
required=True,
empty_label="(Nothing)",
)
class Meta:
model = TeamSquad
fields = ("listOfSquads",)
class TeamDeleteForm(ModelForm):
listOfTeams = forms.ModelChoiceField(
queryset=Team.objects.all(),
label="Team ",
required=True,
empty_label="(Nothing)",
)
class Meta:
model = Team
fields = ("listOfTeams",)
class TournamentForm(ModelForm):
class Meta:
model = Tournament
widgets = {
"startingDate": DateInput(
attrs={"type": "datetime-local"}, format="%Y-%m-%dT%H:%M"
),
"endingDate": DateInput(
attrs={"type": "datetime-local"}, format="%Y-%m-%dT%H:%M"
),
}
fields = "__all__"
def __init__(self, *args, **kwargs):
super(TournamentForm, self).__init__(*args, **kwargs)
# input_formats parses HTML5 datetime-local input to datetime field
self.fields["startingDate"].input_formats = ("%Y-%m-%dT%H:%M",)
self.fields["endingDate"].input_formats = ("%Y-%m-%dT%H:%M",)
class TournamentEditForm(ModelForm):
listOfTournaments = forms.ModelChoiceField(
queryset=Tournament.objects.all(),
label="Tournament ",
required=True,
empty_label="(Nothing)",
)
stage = forms.ModelChoiceField(queryset=Stage.objects.all(), label="Stage ")
winner = forms.ModelChoiceField(queryset=Team.objects.all(), label="Team ")
class Meta:
model = Tournament
widgets = {
"startingDate": DateInput(
attrs={"type": "datetime-local"}, format="%Y-%m-%dT%H:%M"
),
"endingDate": DateInput(
attrs={"type": "datetime-local"}, format="%Y-%m-%dT%H:%M"
),
}
fields = (
"listOfTournaments",
"name",
"stage",
"startingDate",
"endingDate",
"winner",
"stateChoice",
)
def __init__(self, *args, **kwargs):
super(TournamentEditForm, self).__init__(*args, **kwargs)
# input_formats parses HTML5 datetime-local input to datetime field
self.fields["startingDate"].input_formats = ("%Y-%m-%dT%H:%M",)
self.fields["endingDate"].input_formats = ("%Y-%m-%dT%H:%M",)
class TournamentDeleteForm(ModelForm):
listOfTournaments = forms.ModelChoiceField(
queryset=Tournament.objects.all(),
label="Tournament ",
required=True,
empty_label="(Nothing)",
)
class Meta:
model = Tournament
fields = ("listOfTournaments",)
class CaptchaForm(forms.Form):
captcha = ReCaptchaField(widget=ReCaptchaWidget())
class StageDeleteForm(ModelForm):
listOfStages = MyModelChoiceField(
queryset=Stage.objects.all(), label="Stage ", required=False, empty_label=None
)
class Meta:
model = Stage
fields = ("listOfStages",)
class MatchCreateForm(ModelForm):
team1 = forms.ModelChoiceField(
queryset=Team.objects.all(),
required=True,
empty_label="(None)",
label="Team no.1",
help_text="Required",
)
team2 = forms.ModelChoiceField(
queryset=Team.objects.all(),
required=True,
empty_label="(None)",
label="Team no.2",
help_text="Required",
)
date = forms.DateField(help_text="e.g. 2019-01-01 (YYYY-MM-DD)")
class Meta:
model = Match
fields = ("team1", "team2", "date")
class PlayerCreateForm(ModelForm):
name = forms.CharField(
max_length=20, required=True, label="Name", help_text="Required"
)
secondName = forms.CharField(
max_length=20, required=True, label="Second name", help_text="Required"
)
role = forms.CharField(max_length=20, label="Role", required=False)
birthDate = forms.DateField(help_text="e.g. 2019-01-01 (YYYY-MM-DD)")
height = forms.IntegerField(label="Height (cm)", required=False, min_value=150)
class Meta:
model = Player
fields = ("name", "secondName", "role", "birthDate", "height")
class MatchEditForm(ModelForm):
listOfMatches = forms.ModelChoiceField(
queryset=Match.objects.all(), label="Match", required=True, empty_label="(None)"
)
date = forms.DateField(help_text="e.g. 2019-01-01 (YYYY-MM-DD)")
points = forms.IntegerField(
label="Goals scored by team 1", required=True, min_value=0
)
points2 = forms.IntegerField(
label="Goals scored by team 2", required=True, min_value=0
)
class Meta:
model = Match
fields = ["listOfMatches", "date", "points", "points2"]
class PlayerEditForm(ModelForm):
listOfPlayers = forms.ModelChoiceField(
queryset=Player.objects.all(),
label="Player",
required=True,
empty_label="(None)",
)
name = forms.CharField(max_length=20, required=True, label="Name")
secondName = forms.CharField(max_length=20, required=True, label="Second name")
role = forms.CharField(max_length=20, label="Role")
birthDate = forms.DateField(help_text="e.g. 2019-01-01 (YYYY-MM-DD)")
height = forms.IntegerField(label="Height (cm)", min_value=150)
numberOfGoals = forms.IntegerField(label="Scored Goals", min_value=0)
class Meta:
model = Player
fields = [
"listOfPlayers",
"name",
"secondName",
"role",
"birthDate",
"height",
"numberOfGoals",
]
class PlayerDeleteForm(ModelForm):
listOfPlayers = forms.ModelChoiceField(
queryset=Player.objects.all(),
label="Player",
required=True,
empty_label="(None)",
)
class Meta:
model = Player
fields = ("listOfPlayers",)
class MatchDeleteForm(ModelForm):
listOfMatches = forms.ModelChoiceField(
queryset=Match.objects.all(), label="Match", required=True, empty_label="(None)"
)
class Meta:
model = Match
fields = ("listOfMatches",)
class ShootersForm(ModelForm):
listOfMatches = forms.ModelChoiceField(
queryset=Match.objects.all(), label="Match", required=True, empty_label="(None)"
)
listOfPlayers = forms.ModelChoiceField(
queryset=Player.objects.all(),
label="Player",
required=True,
empty_label="(None)",
)
goals = forms.IntegerField(label="Number of shot goals", min_value=1)
class Meta:
model = ShootersMatch
fields = ("listOfMatches", "listOfPlayers", "goals")
| 29.078624 | 88 | 0.610224 | 11,165 | 0.943388 | 0 | 0 | 0 | 0 | 0 | 0 | 2,310 | 0.195184 |
98277c0dd3731669035934d65982542e9aceede0 | 3,128 | py | Python | Artificial_Intelligence/botvisible.py | csixteen/HackerRank | 3ef6fa48599341f481b9e266c69df2d449a7b313 | [
"MIT"
] | 4 | 2018-04-19T20:32:54.000Z | 2020-04-21T12:28:00.000Z | Artificial_Intelligence/botvisible.py | csixteen/HackerRank | 3ef6fa48599341f481b9e266c69df2d449a7b313 | [
"MIT"
] | null | null | null | Artificial_Intelligence/botvisible.py | csixteen/HackerRank | 3ef6fa48599341f481b9e266c69df2d449a7b313 | [
"MIT"
] | null | null | null | # coding: -*- utf8 -*-
import os.path
def get_info():
if os.path.exists("map.txt"):
with open("map.txt") as m:
info = m.readline().strip().split(" ")
info[1] = int(info[1])
info[2] = int(info[2])
return info
return None
def update_map_file(r, c, o, m, info):
"""
:param r: Row of the new object
:param c: Column of the new object
:param o: The new object ('-', 'd', 'o', etc)
:param m: The map
:param info:
:return:
"""
grid = [list(row) for row in m]
grid[r][c] = o
with open("map.txt", "w") as map_file:
map_file.write(" ".join(map(str, info)) + "\n")
map_file.write("\n".join(["".join(row) for row in grid]))
def print_board(b):
print("\n".join(["".join(row) for row in b]))
def readjust(r, c, info):
if r == x:
return "RIGHT" if c > y else "LEFT"
else:
return "UP" if r > x else "DOWN"
def next_move(posr, posc, b):
info = get_info()
# Check if we have to readjust the bot
if info and info[0] == "READJUST":
return readjust(posr, posc, info)
# Is the bot standing on dirt?
if b[posr][posc] == "d":
update_map_file(posr, posc, '-', b, info)
return "CLEAN"
# Check the cells immediately up, down, left or right
if posr > 0 and (b[posr-1][posc] == "d"):
return "UP"
elif posr < 4 and (b[posr+1][posc] == "d"):
return "DOWN"
elif posc > 0 and (b[posr][posc-1] == "d"):
return "LEFT"
elif posc < 4 and (b[posr][posc+1] == "d"):
return "RIGHT"
# Scan the surroundings
for accx in range(1, 6):
for r in range(max(posr-accx, 0), min(posr+accx+1, 5)):
for accy in range(1, 6):
row = b[r][max(posc-accy, 0):min(posc+accy+1, 5)]
# There's poop in the row
if row.count("d") > 0:
# If it's in the row where the bot is
if r == posr:
if b[r].index("d") < posc:
return "LEFT"
else:
return "RIGHT"
# It's in another row
elif r < posr:
return "UP"
else:
return "DOWN"
if __name__ == "__main__":
with open("botlarge.txt") as f:
pos = list(map(int, f.readline().strip().split()))
board = [list(f.readline().strip()) for i in range(5)]
doodoo = 7
moves = 0
newr, newc = pos
while doodoo > 0:
n = next_move(newr, newc, board)
if n == "CLEAN":
doodoo -= 1
board[newr][newc] = '-'
print_board(board)
if n == "UP":
newr -= 1
elif n == "DOWN":
newr += 1
elif n == "LEFT":
newc -= 1
elif n == "RIGHT":
newc += 1
print("Next move: {}".format(n))
moves += 1
print("Moves: {}".format(moves))
| 28.436364 | 65 | 0.453325 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 682 | 0.218031 |
9827c78b55c36213e56e3cab4efb3a255d146830 | 21,718 | py | Python | temboardui/plugins/monitoring/tools.py | pierrehilbert/temboard | d28bbe87329d3023774fe8c773b8b4ad50e09757 | [
"PostgreSQL"
] | null | null | null | temboardui/plugins/monitoring/tools.py | pierrehilbert/temboard | d28bbe87329d3023774fe8c773b8b4ad50e09757 | [
"PostgreSQL"
] | null | null | null | temboardui/plugins/monitoring/tools.py | pierrehilbert/temboard | d28bbe87329d3023774fe8c773b8b4ad50e09757 | [
"PostgreSQL"
] | null | null | null | import logging
from .model.orm import (
Check,
Host,
Instance,
)
from .alerting import (
bootstrap_checks,
check_specs,
)
logger = logging.getLogger(__name__)
def merge_agent_info(session, host_info, instances_info):
"""Update the host, instance and database information with the
data received from the agent."""
try:
# Try to get host_id, based on hostname
host_info['host_id'] = get_host_id(session, host_info['hostname'])
except Exception:
# host not found
pass
host = Host.from_dict(host_info)
# Insert or update host information
session.merge(host)
session.flush()
session.commit()
# Get host_id in any case
host_id = get_host_id(session, host_info['hostname'])
for instance_info in instances_info:
# Only process instances marked as available, since only those
# have complete information
if instance_info['available']:
try:
# Try to get instance_id
instance_info['instance_id'] = get_instance_id(
session, host_id, instance_info['port']
)
except Exception:
# instance not found
pass
instance_info['host_id'] = host_id
inst = Instance.from_dict(instance_info)
# Insert or update instance information
session.merge(inst)
session.flush()
session.commit()
return host
def get_host_id(session, hostname):
"""
Get host_id from the hostname.
"""
query = """
SELECT host_id FROM monitoring.hosts
WHERE hostname = :hostname
"""
result = session.execute(query, {"hostname": hostname})
try:
return result.fetchone()[0]
except Exception:
raise Exception("Can't find host_id for \"%s\""
" in monitoring.hosts table." % hostname)
def get_instance_id(session, host_id, port):
"""
Get instance from host_id and port.
"""
query = """
SELECT instance_id
FROM monitoring.instances
WHERE host_id = :host_id AND port = :port
"""
result = session.execute(query, {"host_id": host_id, "port": port})
try:
return result.fetchone()[0]
except Exception:
raise Exception("Can't find instance_id for \"%s/%s\" "
"in monitoring.instances table." % (host_id, port))
def check_agent_key(session, hostname, pg_data, pg_port, agent_key):
query = """
SELECT agent_key
FROM application.instances
WHERE hostname = :hostname AND pg_data=:pgdata AND pg_port = :pgport
LIMIT 1
"""
result = session.execute(
query,
{"hostname": hostname, "pgdata": pg_data, "pgport": pg_port})
try:
row = result.fetchone()
if row[0] == agent_key:
return
except Exception:
raise Exception("Can't find the instance \"%s\" "
"in application.instances table." % hostname)
raise Exception("Can't check agent's key.")
def check_host_key(session, hostname, agent_key):
query = """
SELECT agent_key
FROM application.instances
WHERE hostname = :hostname
"""
result = session.execute(query, {"hostname": hostname})
try:
for row in result.fetchall():
if row[0] == agent_key:
return
except Exception:
raise Exception("Can't find the instance \"%s\" "
"in application.instances table." % hostname)
raise Exception("Can't check agent's key.")
def insert_metrics(session, host, agent_data, logger, hostname, port):
try:
# Find host_id & instance_id
host_id = get_host_id(session, hostname)
instance_id = get_instance_id(session, host_id, port)
except Exception as e:
logger.info("Unable to find host & instance IDs")
logger.debug(agent_data)
logger.exception(str(e))
session.rollback()
return
cur = session.connection().connection.cursor()
for metric in agent_data.keys():
# Do not try to insert empty lines
if len(agent_data[metric]) == 0:
continue
try:
# Insert data
if metric == 'sessions':
for metric_data in agent_data['sessions']:
query = """
INSERT INTO monitoring.metric_sessions_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['dbname'],
(
None,
metric_data['active'],
metric_data['waiting'],
metric_data['idle'],
metric_data['idle_in_xact'],
metric_data['idle_in_xact_aborted'],
metric_data['fastpath'],
metric_data['disabled'],
metric_data['no_priv']
)
)
)
elif metric == 'xacts':
for metric_data in agent_data['xacts']:
query = """
INSERT INTO monitoring.metric_xacts_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['dbname'],
(
None,
str(metric_data['measure_interval']),
metric_data['n_commit'],
metric_data['n_rollback']
)
)
)
elif metric == 'locks':
for metric_data in agent_data['locks']:
query = """
INSERT INTO monitoring.metric_locks_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['dbname'],
(
None,
metric_data['access_share'],
metric_data['row_share'],
metric_data['row_exclusive'],
metric_data['share_update_exclusive'],
metric_data['share'],
metric_data['share_row_exclusive'],
metric_data['exclusive'],
metric_data['access_exclusive'],
metric_data['siread'],
metric_data['waiting_access_share'],
metric_data['waiting_row_share'],
metric_data['waiting_row_exclusive'],
metric_data['waiting_share_update_exclusive'],
metric_data['waiting_share'],
metric_data['waiting_share_row_exclusive'],
metric_data['waiting_exclusive'],
metric_data['waiting_access_exclusive']
)
)
)
elif metric == 'blocks':
for metric_data in agent_data['blocks']:
query = """
INSERT INTO monitoring.metric_blocks_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['dbname'],
(
None,
str(metric_data['measure_interval']),
metric_data['blks_read'],
metric_data['blks_hit'],
metric_data['hitmiss_ratio']
)
)
)
elif metric == 'bgwriter':
for metric_data in agent_data['bgwriter']:
query = """
INSERT INTO monitoring.metric_bgwriter_current
VALUES (%s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
(
None,
str(metric_data['measure_interval']),
metric_data['checkpoints_timed'],
metric_data['checkpoints_req'],
metric_data['checkpoint_write_time'],
metric_data['checkpoint_sync_time'],
metric_data['buffers_checkpoint'],
metric_data['buffers_clean'],
metric_data['maxwritten_clean'],
metric_data['buffers_backend'],
metric_data['buffers_backend_fsync'],
metric_data['buffers_alloc'],
metric_data['stats_reset']
)
)
)
elif metric == 'db_size':
for metric_data in agent_data['db_size']:
query = """
INSERT INTO monitoring.metric_db_size_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['dbname'],
(
None,
metric_data['size']
)
)
)
elif metric == 'tblspc_size':
for metric_data in agent_data['tblspc_size']:
query = """
INSERT INTO monitoring.metric_tblspc_size_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['spcname'],
(
None,
metric_data['size']
)
)
)
elif metric == 'filesystems_size':
for metric_data in agent_data['filesystems_size']:
query = """
INSERT INTO monitoring.metric_filesystems_size_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
host_id,
metric_data['mount_point'],
(
None,
metric_data['used'],
metric_data['total'],
metric_data['device']
)
)
)
elif metric == 'temp_files_size_tblspc':
for metric_data in agent_data['temp_files_size_tblspc']:
query = """
INSERT INTO
monitoring.metric_temp_files_size_tblspc_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['spcname'],
(
None,
metric_data['size']
)
)
)
elif metric == 'temp_files_size_db':
for metric_data in agent_data['temp_files_size_db']:
query = """
INSERT INTO
monitoring.metric_temp_files_size_db_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['dbname'],
(
None,
metric_data['size']
)
)
)
elif metric == 'wal_files':
for metric_data in agent_data['wal_files']:
query = """
INSERT INTO monitoring.metric_wal_files_current
VALUES (%s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
(
None,
str(metric_data['measure_interval']),
metric_data['written_size'],
metric_data['current_location'],
metric_data['total'],
metric_data['archive_ready'],
metric_data['total_size']
)
)
)
elif metric == 'cpu':
for metric_data in agent_data['cpu']:
query = """
INSERT INTO monitoring.metric_cpu_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
host_id,
metric_data['cpu'],
(
None,
str(metric_data['measure_interval']),
metric_data['time_user'],
metric_data['time_system'],
metric_data['time_idle'],
metric_data['time_iowait'],
metric_data['time_steal']
)
)
)
elif metric == 'process':
for metric_data in agent_data['process']:
query = """
INSERT INTO monitoring.metric_process_current
VALUES (%s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
host_id,
(
None,
str(metric_data['measure_interval']),
metric_data['context_switches'],
metric_data['forks'],
metric_data['procs_running'],
metric_data['procs_blocked'],
metric_data['procs_total']
)
)
)
elif metric == 'memory':
for metric_data in agent_data['memory']:
query = """
INSERT INTO monitoring.metric_memory_current
VALUES (%s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
host_id,
(
None,
metric_data['mem_total'],
metric_data['mem_used'],
metric_data['mem_free'],
metric_data['mem_buffers'],
metric_data['mem_cached'],
metric_data['swap_total'],
metric_data['swap_used']
)
)
)
elif metric == 'loadavg':
for metric_data in agent_data['loadavg']:
query = """
INSERT INTO monitoring.metric_loadavg_current
VALUES (%s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
host_id,
(
None,
metric_data['load1'],
metric_data['load5'],
metric_data['load15']
)
)
)
elif metric == 'vacuum_analyze':
for metric_data in agent_data['vacuum_analyze']:
query = """
INSERT INTO monitoring.metric_vacuum_analyze_current
VALUES (%s, %s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
metric_data['dbname'],
(
None,
str(metric_data['measure_interval']),
metric_data['n_vacuum'],
metric_data['n_analyze'],
metric_data['n_autovacuum'],
metric_data['n_autoanalyze']
)
)
)
elif metric == 'replication':
for metric_data in agent_data['replication']:
query = """
INSERT INTO monitoring.metric_replication_current
VALUES (%s, %s, %s)
"""
cur.execute(
query,
(
metric_data['datetime'],
instance_id,
(
None,
metric_data['receive_location'],
metric_data['replay_location']
)
)
)
session.connection().connection.commit()
except Exception as e:
logger.info("Metric data not inserted for '%s' type" % (metric))
logger.debug(agent_data[metric])
logger.exception(str(e))
session.connection().connection.rollback()
def get_host_checks(session, host_id):
# Returns enabled alerting checks as list of tuples:
# (name, warning threshold, critical threshold)
checks = session.query(Check).filter(Check.host_id == host_id)
return [(c.name, c.warning, c.critical)
for c in checks if c.enabled]
def populate_host_checks(session, host_id, instance_id, hostinfo):
# Populate checks table with bootstraped checks if needed
q = session.query(Check)
n = q.filter(Check.host_id == host_id).count()
if n != 0:
return
specs = check_specs
for bc in bootstrap_checks(hostinfo):
c = Check(host_id=host_id,
instance_id=instance_id,
name=bc[0],
enabled=True,
warning=bc[1],
critical=bc[2],
description=specs.get(bc[0]).get('description'))
session.add(c)
session.commit()
| 39.061151 | 78 | 0.38056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6,140 | 0.282715 |
9827d15b9ed4fc60f071b968ac243471d2ecbda1 | 6,202 | py | Python | coordFromRsid.py | bnwolford/FHiGR_score | b76a4aa126830d08df3241674f8017d989ecadbd | [
"MIT"
] | null | null | null | coordFromRsid.py | bnwolford/FHiGR_score | b76a4aa126830d08df3241674f8017d989ecadbd | [
"MIT"
] | null | null | null | coordFromRsid.py | bnwolford/FHiGR_score | b76a4aa126830d08df3241674f8017d989ecadbd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#===============================================================================
# Copyright (c) 2020 Brooke Wolford
# Lab of Dr. Cristen Willer and Dr. Mike Boehnke
# University of Michigan
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
#SOFTWARE.
#============================================================================
############################
##### IMPORT MODULES #######
###########################
import os
import sys
import sqlite3
import rsidx
import argparse
import gzip
###########################
##### PARSE ARGUMENTS ####
###########################
def get_settings():
parser = argparse.ArgumentParser(description="Convert weights files with just rsIDs to ones with chromosomes and coordinates too\n")
parser.add_argument("-v","--vcf",help="bgzipped and tabixed vcf file",type=str,required=True)
parser.add_argument("-r","--rsidx",help="rsidx file",type=str)
parser.add_argument("-w","--weights",help="weights file. assumes one header. will skip lines with #",type=str,required=True)
parser.add_argument("-c","--col",help="0-based column in weights file that rsID is in",type=int,default=0)
parser.add_argument("-p","--prefix",help="Prefix for output file, including path",type=str,required=True)
args=parser.parse_args()
return args
###############################
######## SUB ROUTINES #########
###############################
def open_zip(f):
if ".gz" in f:
command=gzip.open(f,"rt")
print("Opening gzipped file %s\n" % f,file=sys.stderr)
elif f == "-":
command=sys.stdin()
else:
command=open(f,"rt")
print("Opening file %s\n" % f,file=sys.stderr)
return command
def index(vcf):
with sqlite3.connect('myidx.db') as dbconn, open(vcf, 'r') as vcffh:
rsidx.index.index(dbconn, vcffh)
# rsidx index 00-All.vcf.gz 00-All.vcf.rsidx
def search(rsidlist,vcf,index):
print("Querying markers from weights file in VCF\n",file=sys.stderr)
in_len=len(rsidlist)
rsid_dict={}
with sqlite3.connect(index) as dbconn:
for line in rsidx.search.search(rsidlist, dbconn, vcf):
ls=line.rstrip()
lineList=ls.split("\t")
rsid_dict[lineList[2]]=lineList[:5] #assumes VCF is chr, pos, rsID, REF, ALT
out_len=len(rsid_dict.keys())
if in_len!=out_len:
diff=int(in_len)-int(out_len)
print("Not all rsIDs from weights file could be found in the VCF. Missing %d of %d\n" % (diff,in_len),file=sys.stderr)
else:
print("All %d rsIDs from weights file could be found in the VCF.\n" % in_len,file=sys.stderr)
return rsid_dict
def rsid_from_weights(weights,col):
print("Getting rsIDs from weights file %s\n" %weights, file=sys.stderr)
command=open_zip(weights)
rsid_list=[]
header_count=0
with command as f:
for line in f:
ls=line.rstrip()
if ls[0] != "#":
if header_count==0: #skip first header after any lines with #
header_count+=1
next
else:
lineList=ls.split()
rsid_list.append(lineList[col])
return rsid_list
def merge(weights,col,rsid_dict,prefix):
command=open_zip(weights)
header_count=0
output=prefix + "_reformat.txt"
print("Writing new weights file %s\n" %output, file=sys.stderr)
with open(output,"w") as o:
with command as f:
for line in f:
ls=line.rstrip()
if ls[0] == "#":
o.write(ls+"\n")
elif header_count==0:
lineList=ls.split()
o.write("\t".join(lineList+["CHR","POS","REF","ALT"])+"\n") #write header
header_count+=1
else:
lineList=ls.split()
try:
from_vcf=rsid_dict[lineList[col]] #look up from dictionary from vcf
## handle occurence of multiple alt alleles by printing each potential entry as a newline
for alt_allele in from_vcf[4].split(","):
o.write("\t".join(lineList+from_vcf[0:2]+[from_vcf[3]]+[alt_allele])+"\n")
except KeyError:
o.write("\t".join(lineList+["NA"]*4)+"\n") #rsid not in VCF
f.close()
o.close()
os.system("gzip "+output) #ystem call to gzip
return 0
#########################
########## MAIN #########
#########################
def main():
#get arguments
args = get_settings()
print(args)
#github package https://github.com/bioforensics/rsidx
if not os.path.exists(args.vcf):
sys.exit("VCF does not exist\n")
if not os.path.exists(args.rsidx):
sys.exit("RSIDX does not exist\n")
#index(args.vcf)
#get rsids from weights file
rsid_list=rsid_from_weights(args.weights,args.col)
#search vcf
rsid_dict=search(rsid_list,args.vcf,args.rsidx)
#merge new info with weights file
merge(args.weights,args.col,rsid_dict,args.prefix)
#call main
if __name__ == "__main__":
main()
111
| 37.137725 | 136 | 0.589971 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,924 | 0.471461 |
982874d77b210ce496dff9011612aad7e92e965a | 215 | py | Python | vnpy/pricing/crrCython/setup.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 5 | 2019-01-17T12:14:14.000Z | 2021-05-30T10:24:42.000Z | vnpy/pricing/crrCython/setup.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 1 | 2018-06-12T10:08:24.000Z | 2018-06-12T10:08:24.000Z | vnpy/pricing/crrCython/setup.py | black0144/vnpy | 0d0ea30dad14a0150f7500ff9a62528030321426 | [
"MIT"
] | 5 | 2019-03-26T03:17:45.000Z | 2019-11-05T08:08:18.000Z | # encoding: UTF-8
from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
name = 'crrCython',
ext_modules = cythonize("crrCython.pyx"),
include_dirs = [numpy.get_include()]
)
| 17.916667 | 43 | 0.739535 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.2 |
9829af145c0226598d0d6a57a52d430b4f3cca1d | 5,700 | py | Python | popularity_sequence_prediction/callbacks.py | JennyXieJiayi/TSMMVED | c54f5f3c264636cb7ea77802ae02b681664be6f4 | [
"Apache-2.0"
] | 7 | 2021-05-18T08:25:48.000Z | 2022-02-09T11:44:20.000Z | popularity_sequence_prediction/callbacks.py | JennyXieJiayi/TSMMVED | c54f5f3c264636cb7ea77802ae02b681664be6f4 | [
"Apache-2.0"
] | null | null | null | popularity_sequence_prediction/callbacks.py | JennyXieJiayi/TSMMVED | c54f5f3c264636cb7ea77802ae02b681664be6f4 | [
"Apache-2.0"
] | 2 | 2021-05-21T16:05:22.000Z | 2021-05-21T16:30:48.000Z | '''
Copyright (c) 2021. IIP Lab, Wuhan University
'''
import os
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras import models
from tensorflow.keras import layers
from tensorflow.keras import callbacks
from data import *
from layers import ProductOfExpertGaussian as POE
def _name_var_dict():
name_var_dict = {
"lr" : "self.model.optimizer.lr",
"kl_gauss" : "self.model.sampler.gauss_loss.lamb_kl",
}
return name_var_dict
class AnnealEveryEpoch(callbacks.Callback):
'''
Anneal parameters according to some fixed
schedule every time an epoch begins
'''
def __init__(self, name_schedule_dict, **kwargs):
super(AnnealEveryEpoch, self).__init__(**kwargs)
self.name_schedule_dict = name_schedule_dict
def on_train_begin(self, epoch, logs=None):
name_var_dict = _name_var_dict()
self.var_schedule_dict = {
name_var_dict[name]:schedule
for name, schedule in self.name_schedule_dict.items()
}
def on_epoch_begin(self, epoch, logs=None):
for var, schedule in self.var_schedule_dict.items():
K.set_value(eval(var), schedule.value(epoch))
def on_epoch_end(self, epoch, logs=None):
print(), print("|"+"-"*13+"|"+"-"*10+"|")
for var, _ in self.var_schedule_dict.items():
print("|{:^13}|{:^10.5f}|".format(
eval(var).name, K.get_value(eval(var))
))
print("|"+"-"*13+"|"+"-"*10+"|"), print()
class ValidateRecordandSaveBest(callbacks.Callback):
'''
Evaluate model performance on validation set,
record the training dynamic every epoch and
save the best model with lowest nMSE or Corr.
'''
def __init__(self, val_gen, rec_path, model_root, **kwargs):
super(ValidateRecordandSaveBest, self).__init__(**kwargs)
self.val_gen = val_gen
self.rec_path = rec_path
self.model_root = model_root
self.best_nmse = np.inf
self.best_corr = -np.inf
def _build_test_model(self):
abst_in = self.model.inputs[-1]
if self.model.encodertype == "user":
uid_in = self.model.inputs[0]
mods_in = self.model.inputs[1]
uid_emb = self.model.get_layer("uid_emb")(uid_in)
uid_emb = self.model.get_layer("uid_emb_reshape")(uid_emb)
concat = layers.Concatenate(axis=-1)([uid_emb, mods_in])
mean_stds = self.model.encoders[0](concat)
mean = mean_stds[0]
input_space = [uid_in] + [mods_in] + [abst_in]
else:
uemb_in = self.model.inputs[0]
mods_in = self.model.inputs[1:-1]
encoders = self.model.encoders
mean_stds = [encoder(i) for encoder, i in zip(encoders, mods_in)]
mean, _ = POE()(mean_stds)
input_space = [uemb_in] + mods_in + [abst_in]
### In validation, use the mode deterministically
pop_sequence = self.model.decoder([mean, abst_in])
pred_model = models.Model(inputs=input_space, outputs=pop_sequence)
return pred_model
def _pearson_corr(self, preds, truth):
corr = 0
num_samples = len(preds)
cnt_samples = num_samples
for i in range(num_samples):
corr_this = pd.Series(preds[i]).corr(pd.Series(truth[i]))
if np.isnan(corr_this):
cnt_samples = cnt_samples-1
continue
corr += corr_this
return corr / cnt_samples
def _nmse(self, preds, truth):
return np.mean(np.square(preds - truth)) / (truth.std()**2)
def on_train_begin(self, epoch, logs=None):
with open(self.rec_path, "a") as f:
f.write("nmse\tcorr\n")
def on_epoch_end(self, epoch, logs=None):
pred_model = self._build_test_model()
num_videos = self.val_gen.num_videos
batch_size = self.val_gen.batch_size
timesteps = self.val_gen.timesteps
preds = np.empty([num_videos, timesteps], dtype=np.float32)
truth = np.empty([num_videos, timesteps], dtype=np.float32)
for i, [features, targets] in enumerate(self.val_gen):
preds_batch = np.squeeze(pred_model.predict(features))
targets_batch = np.squeeze(targets)
preds[i*batch_size:(i+1)*batch_size] = preds_batch
truth[i*batch_size:(i+1)*batch_size] = targets_batch
nmse = self._nmse(preds, truth)
corr = self._pearson_corr(preds, truth)
with open(self.rec_path, "a") as f:
### Record the training dynamic
f.write("{}\t{}\n".format(nmse, corr))
if nmse < self.best_nmse:
### Save the best model for nmse
self.best_nmse = nmse
self.model.save(os.path.join(self.model_root, "best_nmse.h5"))
if corr > self.best_corr:
### Save the best model for corr
self.best_corr = corr
self.model.save(os.path.join(self.model_root, "best_corr.h5"))
### Print out the current validation metrics
print("-"*10+"validation"+"-"*10)
print(self.rec_path)
print("curr nmse: {}; curr corr: {}".format(nmse, corr))
print("best nmse: {}; best corr: {}".format(self.best_nmse, self.best_corr))
print("-"*8+"validation End"+"-"*8)
if __name__ == "__main__":
'''
For test purpose ONLY
'''
pass | 36.075949 | 85 | 0.590526 | 5,033 | 0.882982 | 0 | 0 | 0 | 0 | 0 | 0 | 903 | 0.158421 |
982a5a6c85f4198d136ac06c6691b36716f7b587 | 940 | py | Python | sdks/python/test/test_ReleaseUpdateError.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | null | null | null | sdks/python/test/test_ReleaseUpdateError.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 6 | 2019-10-23T06:38:53.000Z | 2022-01-22T07:57:58.000Z | sdks/python/test/test_ReleaseUpdateError.py | Brantone/appcenter-sdks | eeb063ecf79908b6e341fb00196d2cd9dc8f3262 | [
"MIT"
] | 2 | 2019-10-23T06:31:05.000Z | 2021-08-21T17:32:47.000Z | # coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: benedetto.abbenanti@gmail.com
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
from __future__ import absolute_import
import unittest
import appcenter_sdk
from ReleaseUpdateError.clsReleaseUpdateError import ReleaseUpdateError # noqa: E501
from appcenter_sdk.rest import ApiException
class TestReleaseUpdateError(unittest.TestCase):
"""ReleaseUpdateError unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testReleaseUpdateError(self):
"""Test ReleaseUpdateError"""
# FIXME: construct object with mandatory attributes with example values
# model = appcenter_sdk.models.clsReleaseUpdateError.ReleaseUpdateError() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 23.5 | 95 | 0.723404 | 432 | 0.459574 | 0 | 0 | 0 | 0 | 0 | 0 | 493 | 0.524468 |
982acf7f9f2218e64a54202928052ad4200a4b8a | 1,720 | py | Python | main.py | matr095/A-Day-Calculator | 76859f69fe7731f8292ca9830e201643f4420820 | [
"Apache-2.0"
] | null | null | null | main.py | matr095/A-Day-Calculator | 76859f69fe7731f8292ca9830e201643f4420820 | [
"Apache-2.0"
] | null | null | null | main.py | matr095/A-Day-Calculator | 76859f69fe7731f8292ca9830e201643f4420820 | [
"Apache-2.0"
] | null | null | null | from ftplib import FTP
from datetime import *
from tkinter import *
def interval():
now = date.today()
yourDay = int(input("Vous êtes né quel jour ? "))
yourMonth = int(input("Vous êtes né quel mois ? "))
yourYear = int(input("Vous êtes né quelle année ? "))
birthday = date(yourYear, yourMonth, yourDay)
daysPassed = now - birthday
import os
clear = lambda: os.system('clear')
clear()
endOfString = str(daysPassed).index(" ")
print("Tu as " + str(daysPassed)[:endOfString] + " jours !")
def calculate():
global result
global response
try:
result.destroy()
except:
print("no")
now = date.today()
yourDay = int(day.get('1.0', END))
yourMonth = int(month.get('1.0', END))
yourYear = int(year.get('1.0', END))
birthday = date(yourYear, yourMonth, yourDay)
daysPassed = now - birthday
endOfString = str(daysPassed).index(" ")
response = str(daysPassed)[:endOfString]
result = Label(root, text="Félicitations ! Tu as exactement: " + response + " jours !!!")
result.pack()
root = Tk()
root.minsize(width=640, height=480)
root.maxsize(width=640, height=480)
root.wm_title("A Day Calculator")
Label(root, text="A Day Calculator calcule ton nombre de jours passés depuis ta naissance !").pack()
calc = Button(root, text="Calculer", command=calculate)
calc.pack()
Label(root, text="Jour de naissance").pack()
day = Text(root, width="4", height="2", background="gray")
day.pack()
Label(root, text="Mois de naissance").pack()
month = Text(root, width="4", height="2", background="gray")
month.pack()
Label(root, text="Année de naissance").pack()
year = Text(root, width="4", height="2", background="gray")
year.pack()
Label(root, text="-----------------").pack()
root.mainloop()
| 23.243243 | 100 | 0.674419 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 408 | 0.235838 |
982c53384fb7fdf1f9847dabcbbd5e3b70a7ace1 | 5,032 | py | Python | nodes/sunled_action.py | willdickson/virtual_desert | 989e5b9e3f19e1c502795ae5033873365d325d1b | [
"MIT"
] | 1 | 2021-06-23T06:07:56.000Z | 2021-06-23T06:07:56.000Z | nodes/sunled_action.py | willdickson/virtual_desert | 989e5b9e3f19e1c502795ae5033873365d325d1b | [
"MIT"
] | null | null | null | nodes/sunled_action.py | willdickson/virtual_desert | 989e5b9e3f19e1c502795ae5033873365d325d1b | [
"MIT"
] | null | null | null | import math
import rospy
import numpy as np
import random
from base_action import BaseAction
class SunledAction(BaseAction):
index_to_led_position = {}
def __init__(self,init_angle,device,param,trial_index):
print('sunled action __init__')
super(SunledAction,self).__init__(device,param)
self.init_angle = init_angle
self.position = 0
self.last_update_t = None
self.trial_index = trial_index
def update(self,t,angle):
rval_msg = super(SunledAction,self).update(t,angle)
if self.param['mode'] == 'inherit_from':
pass
if self.last_update_t is None:
self.last_update_t = t
dt = t - self.last_update_t
if dt > self.param['update_period']:
if self.param['mode'] == 'fixed_rate':
self.position = dt*self.param['rate'] + self.position
self.position = np.mod(self.position, self.param['number_of_leds'])
self.device.set_led(int(self.position),self.param['rgb_value'])
self.last_update_t = t
return rval_msg
def start(self):
if not self.is_started:
#rospy.logwarn(self.param['mode'])
if self.param['mode'] in ('fixed_position', 'fixed_rate'):
if self.param['position'] == 'inherit':
inherit_index = self.param['inherit_from']
self.position = self.index_to_led_position[inherit_index]
else:
self.position = self.param['position']
self.device.set_led(int(self.position),self.param['rgb_value'])
self.index_to_led_position[self.trial_index] = self.position
elif self.param['mode'] == 'set_by_angle':
self.position = self.get_position_from_table(self.init_angle)
self.index_to_led_position[self.trial_index] = self.position
if self.position is not None:
self.device.set_led(int(self.position),self.param['rgb_value'])
#fponce edit 23/01/2020
elif self.param['mode'] == 'all_ON':
if self.position is not None:
self.device.set_all(self.param['rgb_value'])
self.index_to_led_position[self.trial_index] = -1
#fponce edit
elif self.param['mode'] == 'inherit_n_set_by_table':
inherit_index = self.param['inherit_from']
self.prev_position = self.index_to_led_position[inherit_index]
self.position = self.get_position_from_ledtable(self.prev_position)
self.index_to_led_position[self.trial_index] = self.position
if self.position is not None:
self.device.set_led(int(self.position),self.param['rgb_value'])
elif self.param['mode'] == 'inherit_from_last':
inherit_index = self.trial_index-1
self.position = self.index_to_led_position[inherit_index]
self.index_to_led_position[self.trial_index] = self.position
elif self.param['mode'] == 'random_from_list':
self.position = self.get_random_from_list(self.param['sunled_position_list'])
self.index_to_led_position[self.trial_index] = self.position
if self.position is not None:
self.device.set_led(int(self.position),self.param['rgb_value'])
##########
else:
raise ValueError, 'unknown mode'
#rospy.logwarn(self.position)
self.is_started = True
# def stop(self):
# if not self.is_stopped:
# self.device.set_led(-1,(0,0,0))
# self.is_stopped = True
def get_position_from_table(self,angle):
angle_pair_list = [angle_pair for angle_pair,led_index in self.param['sunled_table']]
led_index_list = [led_index for angle,led_index in self.param['sunled_table']]
position = None
for angle_pair, led_index in zip(angle_pair_list, led_index_list):
lower_angle, upper_angle = angle_pair
if lower_angle <= self.init_angle and self.init_angle <= upper_angle:
position = led_index
break
return position
######################
#fponce edit
def get_position_from_ledtable(self,prev_position):
prev_led_list = [prev_led for prev_led,led_index in self.param['led_to_led_table']]
led_index_list = [led_index for old_led,led_index in self.param['led_to_led_table']]
position = None
for prev_led, led_index in zip(prev_led_list, led_index_list):
if prev_led == prev_position:
position = led_index
return position
def get_random_from_list(self, position_list):
position = random.choice(position_list)
return position
| 43.37931 | 93 | 0.595787 | 4,931 | 0.979928 | 0 | 0 | 0 | 0 | 0 | 0 | 745 | 0.148052 |
982eca037ad091ca6153033d361aee2cf39feb98 | 371 | py | Python | Base64_Cleanup.py | Har6ard/HackTheBox | a1dcfa6373afa13469250572d5e93956c24e01dc | [
"MIT"
] | null | null | null | Base64_Cleanup.py | Har6ard/HackTheBox | a1dcfa6373afa13469250572d5e93956c24e01dc | [
"MIT"
] | null | null | null | Base64_Cleanup.py | Har6ard/HackTheBox | a1dcfa6373afa13469250572d5e93956c24e01dc | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
This script is just to clean up Base64 if is has � present in the output.
Example:
$�G�r�o�U�P�P�O�L�i�C�Y�S�E�t�t�I�N�G�s� �=� �[�r�E�F�]�.�A�S�s�e�M�B�L�Y�.�G�E�t�T�y�p�E�
$GroUPPOLiCYSEttINGs = [rEF].ASseMBLY.GEtTypE
"""
with open("./target.txt", "r") as f_obj:
data = f_obj.read()
cleaned = data.replace("�", "")
print(cleaned)
| 24.733333 | 90 | 0.587601 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.782796 |
982f189964a5ea4bd6d468801ff14602fedb3652 | 2,347 | py | Python | Pio/Pio_prefs.py | arthole/Pio | 1c82ce63da55962cddbb20e9ac4a85d7b8855925 | [
"BSD-2-Clause"
] | null | null | null | Pio/Pio_prefs.py | arthole/Pio | 1c82ce63da55962cddbb20e9ac4a85d7b8855925 | [
"BSD-2-Clause"
] | null | null | null | Pio/Pio_prefs.py | arthole/Pio | 1c82ce63da55962cddbb20e9ac4a85d7b8855925 | [
"BSD-2-Clause"
] | null | null | null |
#Pio_prefs
prefsdict = {
###modify or add preferences below
#below are the database preferences.
"sqluser" : "user",
"sqldb" : "database",
"sqlhost" : "127.0.0.1",
#authorization must come from same address - extra security
#valid values yes/no
"staticip" : "no",
#below is to do logging of qrystmts.
"piolog" : "yes",
#below are the show pobj and the show pinp preferences.
#pobj's get converted to <xpobj and pinp get converted to <xpinp and ppref get converted to <xppref
#any value other than yes is a no.
#ppref's of showpobj and showppref can be included in html. (showpinp is ignored because pprefs are actually processed after pinp, pifs.
#for ppref tag to override the value below, it must be the first ppref...otherwise it will only the show value for it and any following ppref
#example: <ppref name="showpobj" value="no">
"showpobj" : 'no',
"showpinp" : 'no',
"showppref" : 'no',
#below are the error preferences
#values for pobjerror are "bottom", "top", "" or "ignore", or a page, as in "error.html"
#bottom places errors at the end of the page, top at the begining, "" or ignore shows no error
#and a page (indicated with a ".") will open that return that page.
#example: <ppref name="pobjerror" value="ignore">
"pobjerror" : "bottom",
"pobjautherrorfile" : "Pio_login.html",
"pobjautherror" : "top",
"piosiderrorfile" : "none",
"piosiderror" : "bottom",
"loginerror" : "Pio_login.html",
#below is the administrator email
"pioadminemail" : "user@yourdomain.com",
#below is the upload folder
"uploadpath" : "/Library/WebServer/Documents/upload/",
#below is the 404 file not found preference
"pio404" : 'Pio_404.html',
#below is the error code for when an include file is not found
#if the value is "comment" the program will show the error as a comment in html, otherwise it will show it as normal text
"include404" : "nocomment",
#below are special user preferences you can include in your own programs
"dictionarykey" : "value"
###modify or add preferences above
##
##prefs can be overridden on a page by including a tag such as <ppref pref="pobjautherror" value="ignore'>
}
def gimme(dictkey):
if prefsdict.has_key(dictkey):
returnvalue = prefsdict[dictkey]
else:
returnvalue = ""
return returnvalue
def gimmedict():
return prefsdict
| 29.3375 | 142 | 0.707712 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,962 | 0.835961 |
982faf238d52b2846a9e61e74530e474bb1097c9 | 742 | py | Python | setup.py | jonbulica99/deeplator | d6ad8d60b09022e442cbde6c155a2d752072e358 | [
"MIT"
] | 64 | 2017-08-31T19:26:12.000Z | 2022-02-27T19:25:21.000Z | setup.py | jonbulica99/deeplator | d6ad8d60b09022e442cbde6c155a2d752072e358 | [
"MIT"
] | 12 | 2017-09-08T15:12:02.000Z | 2021-12-12T07:18:41.000Z | setup.py | jonbulica99/deeplator | d6ad8d60b09022e442cbde6c155a2d752072e358 | [
"MIT"
] | 14 | 2017-09-07T11:39:44.000Z | 2021-12-11T09:25:29.000Z | #!/usr/bin/env python3
from setuptools import setup
setup(
name="deeplator",
version="0.0.7",
description="Wrapper for DeepL translator.",
long_description="Deeplator is a library enabling translation via the DeepL translator.",
author="uinput",
author_email="uinput@users.noreply.github.com",
license="MIT",
url="https://github.com/uinput/deeplator",
keywords=["deepl", "translation", "translate", "language"],
python_requires=">=3",
packages=["deeplator"],
py_modules=["deeplator"],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3"
]
)
| 29.68 | 93 | 0.645553 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 435 | 0.586253 |
9830a97faa3e224e293c5d98b0df9eabae7d19b8 | 3,737 | py | Python | taiga/core/taxonomy.py | flayner2/TaIGa_pkg | 3d58907f5adec7035841e187e25dd786460fd3a2 | [
"MIT"
] | null | null | null | taiga/core/taxonomy.py | flayner2/TaIGa_pkg | 3d58907f5adec7035841e187e25dd786460fd3a2 | [
"MIT"
] | null | null | null | taiga/core/taxonomy.py | flayner2/TaIGa_pkg | 3d58907f5adec7035841e187e25dd786460fd3a2 | [
"MIT"
] | null | null | null | import sys
import logging as log
from ..common import parsers, helpers, retrievers, data_handlers
from ..common.data_models import Taxon
from typing import List
def run_taiga(infile: str,
email: str,
gb_mode: int = 0,
tid: bool = False,
correction: bool = False,
retries: int = 5,
silent: bool = False) -> List[Taxon]:
""" Wrapper for all of TaIGa's main functionalities
Required Parameters:
infile (str): Full path to input file
email (str): Valid user e-mail
Optional Parameters:
gb_mode (int): An integer representing the reading mode for the input file. Options are:
0: a plain text file, which is not handled by this function (Default)
1: Genbank file with multiple records from different organisms
2: Genbank file with a single record from a single organism
3: Genbank file with multiple records from the same organism
tid (bool: Default = False): Tells if TaIGa should expect a input file with Taxon IDs
correction (bool: Default = False): Activates the name correction function.
retries (int: Default = 5): How many time TaIGa should retry after a bad response.
silent (bool: Default = False): Tells if TaIGa should stop printing and create a log file
Returns:
None
"""
# Ouput and input paths
input_path = infile
# Providing the email when doing requests through E-Utils is recommended
user_email = email
# Minor config variables for some of TaIGa's functionalities
retries = retries
create_log = silent
name_correction = correction
# The switches for TaIGa's execution modes, either for Taxon IDs or Genbank files
taxid = tid
mode = gb_mode
# A list to hold Taxon objects
taxon_list: List[Taxon] = []
# Inital configuration for the logging module
# At this point, the output may be set to verbose or not
helpers.config_log(create_log)
log.info("""
*********************************************
* *
* TaIGa - Taxonomy Information Gatherer *
* *
*********************************************""")
# Checking if TaIGa is being run on Taxon ID mode with the '-c' argument
# This is needed because, when run with '--tid', TaIGa never actually tries to correct spelling
# as the retrieved name is assumed to be correct
if taxid and name_correction:
log.error("\nERROR: Please, when running TaIGa with the '--tid' option, don't use the '-c' "
"option as TaIGa already skips the name correction\n")
sys.exit()
# Check if input mode is for a Genbank format file or a text file and then parse the input
if not (mode == 0):
taxon_list = parsers.parse_gb(input_path, mode)
else:
taxon_list = parsers.parse_txt(input_path, taxid)
log.info("\n> Searching for taxonomic information...")
# Checking the type of input (Taxon ID or names) and fetching the rest of the information
if tid:
retrievers.retrieve_from_taxid(taxon_list, user_email, retries)
else:
retrievers.retrieve_from_names(
taxon_list, user_email, name_correction, retries)
# Calling the wrapper function to fetch for the taxonomic information for all organisms
retrievers.retrieve_taxonomy(taxon_list, user_email, retries)
log.info(
"\n> Successfuly created taxa metadata Dataframe. You can manipulate or save it!\n")
# Calling a function to handle the fetched data and convert it to a Pandas DataFrame
return taxon_list
| 38.525773 | 100 | 0.638748 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,436 | 0.65186 |
9830fcb857d526b4820441836a1d74c1992e4612 | 171 | py | Python | ot/views.py | marclanepitt/ot | 3cf4c24cd412735b93e56175ffa31c3eecba8ee5 | [
"MIT"
] | null | null | null | ot/views.py | marclanepitt/ot | 3cf4c24cd412735b93e56175ffa31c3eecba8ee5 | [
"MIT"
] | null | null | null | ot/views.py | marclanepitt/ot | 3cf4c24cd412735b93e56175ffa31c3eecba8ee5 | [
"MIT"
] | null | null | null | from django.shortcuts import render
def HomeView(request):
return render(request , 'site_home.html')
def AboutView(request):
return render(request, 'about.html') | 24.428571 | 45 | 0.748538 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 28 | 0.163743 |
98325c84cea8f152ab2a466da34dc57234932f15 | 1,412 | py | Python | Algo and DSA/LeetCode-Solutions-master/Python/maximum-difference-between-node-and-ancestor.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 3,269 | 2018-10-12T01:29:40.000Z | 2022-03-31T17:58:41.000Z | Algo and DSA/LeetCode-Solutions-master/Python/maximum-difference-between-node-and-ancestor.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 53 | 2018-12-16T22:54:20.000Z | 2022-02-25T08:31:20.000Z | Algo and DSA/LeetCode-Solutions-master/Python/maximum-difference-between-node-and-ancestor.py | Sourav692/FAANG-Interview-Preparation | f523e5c94d582328b3edc449ea16ac6ab28cdc81 | [
"Unlicense"
] | 1,236 | 2018-10-12T02:51:40.000Z | 2022-03-30T13:30:37.000Z | # Time: O(n)
# Space: O(h)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# iterative stack solution
class Solution(object):
def maxAncestorDiff(self, root):
"""
:type root: TreeNode
:rtype: int
"""
result = 0
stack = [(root, 0, float("inf"))]
while stack:
node, mx, mn = stack.pop()
if not node:
continue
result = max(result, mx-node.val, node.val-mn)
mx = max(mx, node.val)
mn = min(mn, node.val)
stack.append((node.left, mx, mn))
stack.append((node.right, mx, mn))
return result
# Time: O(n)
# Space: O(h)
# recursive solution
class Solution2(object):
def maxAncestorDiff(self, root):
"""
:type root: TreeNode
:rtype: int
"""
def maxAncestorDiffHelper(node, mx, mn):
if not node:
return 0
result = max(mx-node.val, node.val-mn)
mx = max(mx, node.val)
mn = min(mn, node.val)
result = max(result, maxAncestorDiffHelper(node.left, mx, mn))
result = max(result, maxAncestorDiffHelper(node.right, mx, mn))
return result
return maxAncestorDiffHelper(root, 0, float("inf"))
| 26.641509 | 75 | 0.524079 | 1,263 | 0.894476 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.192635 |
98329bd5987a8121de4a33c66083b79a38d22afd | 478 | py | Python | blogs/migrations/0012_auto_20200601_1247.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 657 | 2020-05-26T16:16:07.000Z | 2022-03-26T22:35:01.000Z | blogs/migrations/0012_auto_20200601_1247.py | daaawx/bearblog | 5e01e4443c632ff53b918cf8a0d3b1c648b352fe | [
"MIT"
] | 107 | 2020-05-26T17:45:04.000Z | 2022-03-17T08:24:00.000Z | blogs/migrations/0012_auto_20200601_1247.py | chachan/bearblog | f399f806839bea9f6fd499f50c87cf84fda3bc91 | [
"MIT"
] | 42 | 2020-05-26T23:57:58.000Z | 2022-03-15T04:20:26.000Z | # Generated by Django 3.0.6 on 2020-06-01 12:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogs', '0011_auto_20200531_0915'),
]
operations = [
migrations.RemoveField(
model_name='post',
name='tags',
),
migrations.AddField(
model_name='blog',
name='hashtags',
field=models.TextField(blank=True),
),
]
| 20.782609 | 47 | 0.558577 | 385 | 0.805439 | 0 | 0 | 0 | 0 | 0 | 0 | 107 | 0.223849 |
9832e3a03c7ed0e8cda3d657951d2ea2db2a6ab7 | 1,254 | py | Python | nlpr/utils/nL_nr_assay_check.py | jgrembi/nL-qPCR_PathogenChip | 89a319a6039d8bd8ebc3164046dd8789d422551e | [
"CC0-1.0"
] | null | null | null | nlpr/utils/nL_nr_assay_check.py | jgrembi/nL-qPCR_PathogenChip | 89a319a6039d8bd8ebc3164046dd8789d422551e | [
"CC0-1.0"
] | null | null | null | nlpr/utils/nL_nr_assay_check.py | jgrembi/nL-qPCR_PathogenChip | 89a319a6039d8bd8ebc3164046dd8789d422551e | [
"CC0-1.0"
] | null | null | null | # THE PURPOSE OF THIS SCRIPT IS TO SELECT ASSAYS FROM A LIST OF PRIMER COMBINATIONS, SUCH THAT NO MORE THAN TWO OF THE ASSAYS TARGET EXACTLY APROXIMATELY THE SAME POSITIONS .
import sys
fn = sys.argv[1]
fh = open(fn, 'r')
def plus_or_minus(x,h):
L = []
for i in range(h):
L.append(int(x-i))
L.append(int(x+i))
return list(set(L))
def lists_overlap3(a, b):
return bool(set(a) & set(b))
forbidden_range_F = []
forbidden_range_R = []
forbidden_range_F2 = []
forbidden_range_R2= []
forbidden_range_F3 = []
forbidden_range_R3 = []
# Take the best hit
line = fh.readline()
line = line.strip()
print line
for line in fh:
forbidden_range_F3 = list(forbidden_range_F2)
forbidden_range_R3 = list(forbidden_range_R2)
forbidden_range_F2 = list(forbidden_range_F)
forbidden_range_R2 = list(forbidden_range_R)
#print "#####"
#print forbidden_range_F2
#print forbidden_range_F3
#print "#####"
line = line.strip()
start = int(line.split()[6])
end = int(line.split()[7])
forbidden_range_F.append(start)
forbidden_range_R.append(end)
test_F = plus_or_minus(int(start),4)
test_R = plus_or_minus(int(end),4)
if lists_overlap3(test_F, forbidden_range_F2) and lists_overlap3(test_R,forbidden_range_R2):
pass
else:
print line
fh.close()
| 25.08 | 174 | 0.729665 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 274 | 0.218501 |
983304ddbd4a49b1430de787ab4ec1a6757b9a02 | 4,741 | py | Python | script.py | Kevogich/Recurrence-relation | 675996861ca27b6dd25119172e3c3fa317bd9bce | [
"Apache-2.0"
] | 1 | 2016-07-01T22:14:40.000Z | 2016-07-01T22:14:40.000Z | script.py | Kevogich/Recurrence-relation | 675996861ca27b6dd25119172e3c3fa317bd9bce | [
"Apache-2.0"
] | null | null | null | script.py | Kevogich/Recurrence-relation | 675996861ca27b6dd25119172e3c3fa317bd9bce | [
"Apache-2.0"
] | 2 | 2016-10-11T22:37:51.000Z | 2021-05-17T02:47:29.000Z | import pandas as pd
import pandas
import numpy as np
#provide local path
testfile='../input/test.csv'
data = open(testfile).readlines()
sequences={} #(key, value) = (id , sequence)
for i in range(1,len(data)):
line=data[i]
line =line.replace('"','')
line = line[:-1].split(',')
id = int(line[0])
sequence=[int(x) for x in line[1:]];
sequences[id]=sequence
# In[ ]:
def checkRecurrence(seq, order= 2, minlength = 7):
"""
:type seq: List[int]
:type order: int
:type minlength: int
:rtype: List[int]
Check whether the input sequence is a recurrence sequence with given order.
If it is, return the coefficients for the recurrenec relation.
If not, return None.
"""
if len(seq)< max((2*order+1), minlength):
return None
################ Set up the system of equations
A,b = [], []
for i in range(order):
A.append(seq[i:i+order])
b.append(seq[i+order])
A,b =np.array(A), np.array(b)
try:
if np.linalg.det(A)==0:
return None
except TypeError:
return None
############# Solve for the coefficients (c0, c1, c2, ...)
coeffs = np.linalg.inv(A).dot(b)
############ Check if the next terms satisfy recurrence relation
for i in range(2*order, len(seq)):
predict = np.sum(coeffs*np.array(seq[i-order:i]))
if abs(predict-seq[i])>10**(-2):
return None
return list(coeffs)
def predictNextTerm(seq, coeffs):
"""
:type seq: List[int]
:type coeffs: List[int]
:rtype: int
Given a sequence and coefficienes, compute the next term for the sequence.
"""
order = len(coeffs)
predict = np.sum(coeffs*np.array(seq[-order:]))
return int(round(predict))
# ## Example: ##
# * Given a sequence [1,5,11,21,39,73,139,269,527].
# * We verify if it's 3rd order recurrence sequence and find the coefficients (2,-5,4).
# * We then predict the next term using the last 3 terms and the relation $a_{n+3} = 2a_{n}-5a_{n+1}+4a_{n+2}$.
# In[ ]:
seq = [1,5,11,21,39,73,139,269,527]
print (checkRecurrence(seq,3))
print (predictNextTerm(seq, [2,-5,4]))
# # Find 2nd order sequeneces in the test set #
# In[ ]:
order2Seq={} #(key, value) = (sequence id, [prediction, coefficients])
for id in sequences:
seq = sequences[id]
coeff = checkRecurrence(seq,2)
if coeff!=None:
predict = predictNextTerm(seq, coeff)
order2Seq[id]=(predict,coeff)
print ("We found %d sequences\n" %len(order2Seq))
print ("Some examples\n")
print ("ID, Prediction, Coefficients")
for key in sorted(order2Seq)[0:5]:
value = order2Seq[key]
print ("%s, %s, %s" %(key, value[0], [int(round(x)) for x in value[1]]))
# # Find 3rd order sequeneces in the test set #
# In[ ]:
order3Seq={}
for id in sequences:
if id in order2Seq:
continue
seq = sequences[id]
coeff = checkRecurrence(seq,3)
if coeff!=None:
predict = predictNextTerm(seq, coeff)
order3Seq[id]=(predict,coeff)
print ("We found %d sequences\n" %len(order3Seq))
print ("Some examples\n")
print ("ID, Prediction, Coefficients")
for key in sorted(order3Seq)[0:5]:
value = order3Seq[key]
print ("%s, %s, %s" %(key, value[0], [int(round(x)) for x in value[1]]))
# # Find 4th order sequeneces in the test set #
# In[ ]:
order4Seq={}
for id in sequences:
if id in order2Seq or id in order3Seq:
continue
seq = sequences[id]
coeff = checkRecurrence(seq,4)
if coeff!=None:
predict = predictNextTerm(seq, coeff)
order4Seq[id]=(predict,coeff)
print ("We found %d sequences \n" %len(order4Seq))
print ("Some examples\n")
print ("ID, Prediction, Coefficients")
for key in sorted(order4Seq)[4:5]:
value = order4Seq[key]
print ("%s, %s, %s" %(key, value[0], [int(round(x)) for x in value[1]]))
print (sequences[239][0:17])
# ## Recurrence relations not included in OEIS ##
# In the previous cells,
# * We find that Sequence 239 is a 4th order sequence and predict the next term as 5662052980.
# * We check OEIS https://oeis.org/A000773, which confirms the prediction is correct.
# * We observe that this recurrence relation is not described in OEIS. (There are more such sequences.)
# In[ ]:
print("Conclusion:")
print("Number of sequences in the test set:", len(sequences))
print("Number of 2nd order sequences:", len(order2Seq))
print("Number of 3rd order sequences:", len(order3Seq))
print("Number of 4th order sequences:", len(order4Seq))
| 28.560241 | 113 | 0.600717 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,020 | 0.42607 |
9833c8c51418c01dce19ef28d4ce65d9407a8bcf | 5,422 | py | Python | util/mach/mig.py | rovarma/crashpad | 42b57efa554a47745cb84860ead46bc9f32b77a9 | [
"Apache-2.0"
] | 2,151 | 2020-04-18T07:31:17.000Z | 2022-03-31T08:39:18.000Z | util/mach/mig.py | rovarma/crashpad | 42b57efa554a47745cb84860ead46bc9f32b77a9 | [
"Apache-2.0"
] | 395 | 2020-04-18T08:22:18.000Z | 2021-12-08T13:04:49.000Z | util/mach/mig.py | rovarma/crashpad | 42b57efa554a47745cb84860ead46bc9f32b77a9 | [
"Apache-2.0"
] | 338 | 2020-04-18T08:03:10.000Z | 2022-03-29T12:33:22.000Z | #!/usr/bin/env python
# coding: utf-8
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import re
import subprocess
import sys
def FixUserImplementation(implementation):
"""Rewrites a MIG-generated user implementation (.c) file.
Rewrites the file at |implementation| by adding “__attribute__((unused))” to
the definition of any structure typedefed as “__Reply” by searching for the
pattern unique to those structure definitions. These structures are in fact
unused in the user implementation file, and this will trigger a
-Wunused-local-typedefs warning in gcc unless removed or marked with the
“unused” attribute.
"""
file = open(implementation, 'r+')
contents = file.read()
pattern = re.compile('^(\t} __Reply);$', re.MULTILINE)
contents = pattern.sub(r'\1 __attribute__((unused));', contents)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def FixServerImplementation(implementation):
"""Rewrites a MIG-generated server implementation (.c) file.
Rewrites the file at |implementation| by replacing “mig_internal” with
“mig_external” on functions that begin with “__MIG_check__”. This makes these
functions available to other callers outside this file from a linkage
perspective. It then returns, as a list of lines, declarations that can be
added to a header file, so that other files that include that header file will
have access to these declarations from a compilation perspective.
"""
file = open(implementation, 'r+')
contents = file.read()
# Find interesting declarations.
declaration_pattern = \
re.compile('^mig_internal (kern_return_t __MIG_check__.*)$',
re.MULTILINE)
declarations = declaration_pattern.findall(contents)
# Remove “__attribute__((__unused__))” from the declarations, and call them
# “mig_external” or “extern” depending on whether “mig_external” is defined.
attribute_pattern = re.compile(r'__attribute__\(\(__unused__\)\) ')
declarations = ['#ifdef mig_external\nmig_external\n#else\nextern\n#endif\n' +
attribute_pattern.sub('', x) +
';\n' for x in declarations]
# Rewrite the declarations in this file as “mig_external”.
contents = declaration_pattern.sub(r'mig_external \1', contents);
# Crashpad never implements the mach_msg_server() MIG callouts. To avoid
# needing to provide stub implementations, set KERN_FAILURE as the RetCode
# and abort().
routine_callout_pattern = re.compile(
r'OutP->RetCode = (([a-zA-Z0-9_]+)\(.+\));')
routine_callouts = routine_callout_pattern.findall(contents)
for routine in routine_callouts:
contents = contents.replace(routine[0], 'KERN_FAILURE; abort()')
# Include the header for abort().
contents = '#include <stdlib.h>\n' + contents
file.seek(0)
file.truncate()
file.write(contents)
file.close()
return declarations
def FixHeader(header, declarations=[]):
"""Rewrites a MIG-generated header (.h) file.
Rewrites the file at |header| by placing it inside an “extern "C"” block, so
that it declares things properly when included by a C++ compilation unit.
|declarations| can be a list of additional declarations to place inside the
“extern "C"” block after the original contents of |header|.
"""
file = open(header, 'r+')
contents = file.read()
declarations_text = ''.join(declarations)
contents = '''\
#ifdef __cplusplus
extern "C" {
#endif
%s
%s
#ifdef __cplusplus
}
#endif
''' % (contents, declarations_text)
file.seek(0)
file.truncate()
file.write(contents)
file.close()
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument('--developer-dir', help='Path to Xcode')
parser.add_argument('--sdk', help='Path to SDK')
parser.add_argument('--include',
default=[],
action='append',
help='Additional include directory')
parser.add_argument('defs')
parser.add_argument('user_c')
parser.add_argument('server_c')
parser.add_argument('user_h')
parser.add_argument('server_h')
parsed = parser.parse_args(args)
command = ['mig',
'-user', parsed.user_c,
'-server', parsed.server_c,
'-header', parsed.user_h,
'-sheader', parsed.server_h,
]
if parsed.developer_dir is not None:
os.environ['DEVELOPER_DIR'] = parsed.developer_dir
if parsed.sdk is not None:
command.extend(['-isysroot', parsed.sdk])
for include in parsed.include:
command.extend(['-I' + include])
command.append(parsed.defs)
subprocess.check_call(command)
FixUserImplementation(parsed.user_c)
server_declarations = FixServerImplementation(parsed.server_c)
FixHeader(parsed.user_h)
FixHeader(parsed.server_h, server_declarations)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 34.316456 | 80 | 0.709332 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,068 | 0.560365 |
9834ba514f6075eec2c945801708248dc53e6914 | 11,647 | py | Python | geoutils/geovector.py | AdrienWehrle/GeoUtils | 67014d8a89f7d356d60255fadd347b12fe1fe744 | [
"BSD-3-Clause"
] | null | null | null | geoutils/geovector.py | AdrienWehrle/GeoUtils | 67014d8a89f7d356d60255fadd347b12fe1fe744 | [
"BSD-3-Clause"
] | null | null | null | geoutils/geovector.py | AdrienWehrle/GeoUtils | 67014d8a89f7d356d60255fadd347b12fe1fe744 | [
"BSD-3-Clause"
] | null | null | null | """
geoutils.vectortools provides a toolset for working with vector data.
"""
from __future__ import annotations
import warnings
from collections import abc
from numbers import Number
from typing import TypeVar
import geopandas as gpd
import numpy as np
import rasterio as rio
from rasterio import features, warp
from rasterio.crs import CRS
import geoutils as gu
# This is a generic Vector-type (if subclasses are made, this will change appropriately)
VectorType = TypeVar("VectorType", bound="Vector")
class Vector:
"""
Create a Vector object from a fiona-supported vector dataset.
"""
def __init__(self, filename: str | gpd.GeoDataFrame):
"""
Load a fiona-supported dataset, given a filename.
:param filename: The filename or GeoDataFrame of the dataset.
:return: A Vector object
"""
if isinstance(filename, str):
ds = gpd.read_file(filename)
self.ds = ds
self.name: str | gpd.GeoDataFrame | None = filename
elif isinstance(filename, gpd.GeoDataFrame):
self.ds = filename
self.name = None
else:
raise ValueError("filename argument not recognised.")
self.crs = self.ds.crs
def __repr__(self) -> str:
return str(self.ds.__repr__())
def __str__(self) -> str:
"""Provide string of information about Raster."""
return self.info()
def info(self) -> str:
"""
Returns string of information about the vector (filename, coordinate system, number of layers, features, etc.).
:returns: text information about Vector attributes.
:rtype: str
"""
as_str = [ # 'Driver: {} \n'.format(self.driver),
f"Filename: {self.name} \n",
f"Coordinate System: EPSG:{self.ds.crs.to_epsg()}\n",
f"Number of features: {len(self.ds)} \n",
f"Extent: {self.ds.total_bounds.tolist()} \n",
f"Attributes: {self.ds.columns.tolist()} \n",
self.ds.__repr__(),
]
return "".join(as_str)
@property
def bounds(self) -> rio.coords.BoundingBox:
"""Get a bounding box of the total bounds of the Vector."""
return rio.coords.BoundingBox(*self.ds.total_bounds)
def copy(self: VectorType) -> VectorType:
"""Return a copy of the Vector."""
# Utilise the copy method of GeoPandas
new_vector = self.__new__(type(self))
new_vector.__init__(self.ds.copy())
return new_vector # type: ignore
def crop2raster(self, rst: gu.Raster) -> None:
"""
Update self so that features outside the extent of a raster file are cropped.
Reprojection is done on the fly if both data set have different projections.
:param rst: A Raster object or string to filename
"""
# If input is string, open as Raster
if isinstance(rst, str):
rst = gu.Raster(rst)
# Convert raster extent into self CRS
# Note: could skip this if we could test if rojections are same
# Note: should include a method in Raster to get extent in other projections, not only using corners
left, bottom, right, top = rst.bounds
x1, y1, x2, y2 = warp.transform_bounds(rst.crs, self.ds.crs, left, bottom, right, top)
self.ds = self.ds.cx[x1:x2, y1:y2]
def create_mask(
self,
rst: str | gu.georaster.RasterType | None = None,
crs: CRS | None = None,
xres: float | None = None,
yres: float | None = None,
bounds: tuple[float, float, float, float] | None = None,
) -> np.ndarray:
"""
Rasterize the vector features into a boolean raster which has the extent/dimensions of \
the provided raster file.
Alternatively, user can specify a grid to rasterize on using xres, yres, bounds and crs.
Only xres is mandatory, by default yres=xres and bounds/crs are set to self's.
Vector features which fall outside the bounds of the raster file are not written to the new mask file.
:param rst: A Raster object or string to filename
:param crs: A pyproj or rasterio CRS object (Default to rst.crs if not None then self.crs)
:param xres: Output raster spatial resolution in x. Only is rst is None.
:param yres: Output raster spatial resolution in y. Only if rst is None. (Default to xres)
:param bounds: Output raster bounds (left, bottom, right, top). Only if rst is None (Default to self bounds)
:returns: array containing the mask
"""
# If input rst is string, open as Raster
if isinstance(rst, str):
rst = gu.Raster(rst) # type: ignore
# If no rst given, use provided dimensions
if rst is None:
# At minimum, xres must be set
if xres is None:
raise ValueError("at least rst or xres must be set")
if yres is None:
yres = xres
# By default, use self's CRS and bounds
if crs is None:
crs = self.ds.crs
if bounds is None:
bounds = self.ds.total_bounds
# Calculate raster shape
left, bottom, right, top = bounds
height = abs((right - left) / xres)
width = abs((top - bottom) / yres)
if width % 1 != 0 or height % 1 != 0:
warnings.warn("Bounds not a multiple of xres/yres, use rounded bounds")
width = int(np.round(width))
height = int(np.round(height))
out_shape = (height, width)
# Calculate raster transform
transform = rio.transform.from_bounds(left, bottom, right, top, width, height)
# otherwise use directly rst's dimensions
else:
out_shape = rst.shape # type: ignore
transform = rst.transform # type: ignore
crs = rst.crs # type: ignore
# Reproject vector into rst CRS
# Note: would need to check if CRS are different
vect = self.ds.to_crs(crs)
# Rasterize geometry
mask = features.rasterize(
shapes=vect.geometry, fill=0, out_shape=out_shape, transform=transform, default_value=1, dtype="uint8"
).astype("bool")
# Force output mask to be of same dimension as input rst
if rst is not None:
mask = mask.reshape((rst.count, rst.height, rst.width)) # type: ignore
return mask
def rasterize(
self,
rst: str | gu.georaster.RasterType | None = None,
crs: CRS | None = None,
xres: float | None = None,
yres: float | None = None,
bounds: tuple[float, float, float, float] | None = None,
in_value: int | float | abc.Iterable[int | float] | None = None,
out_value: int | float = 0,
) -> np.ndarray:
"""
Return an array with input geometries burned in.
By default, output raster has the extent/dimensions of the provided raster file.
Alternatively, user can specify a grid to rasterize on using xres, yres, bounds and crs.
Only xres is mandatory, by default yres=xres and bounds/crs are set to self's.
Burn value is set by user and can be either a single number, or an iterable of same length as self.ds.
Default is an index from 1 to len(self.ds).
:param rst: A raster to be used as reference for the output grid
:param crs: A pyproj or rasterio CRS object (Default to rst.crs if not None then self.crs)
:param xres: Output raster spatial resolution in x. Only is rst is None.
:param yres: Output raster spatial resolution in y. Only if rst is None. (Default to xres)
:param bounds: Output raster bounds (left, bottom, right, top). Only if rst is None (Default to self bounds)
:param in_value: Value(s) to be burned inside the polygons (Default is self.ds.index + 1)
:param out_value: Value to be burned outside the polygons (Default is 0)
:returns: array containing the burned geometries
"""
# If input rst is string, open as Raster
if isinstance(rst, str):
rst = gu.Raster(rst) # type: ignore
# If no rst given, use provided dimensions
if rst is None:
# At minimum, xres must be set
if xres is None:
raise ValueError("at least rst or xres must be set")
if yres is None:
yres = xres
# By default, use self's CRS and bounds
if crs is None:
crs = self.ds.crs
if bounds is None:
bounds = self.ds.total_bounds
# Calculate raster shape
left, bottom, right, top = bounds
height = abs((right - left) / xres)
width = abs((top - bottom) / yres)
if width % 1 != 0 or height % 1 != 0:
warnings.warn("Bounds not a multiple of xres/yres, use rounded bounds")
width = int(np.round(width))
height = int(np.round(height))
out_shape = (height, width)
# Calculate raster transform
transform = rio.transform.from_bounds(left, bottom, right, top, width, height)
# otherwise use directly rst's dimensions
else:
out_shape = rst.shape # type: ignore
transform = rst.transform # type: ignore
crs = rst.crs # type: ignore
# Reproject vector into rst CRS
# Note: would need to check if CRS are different
vect = self.ds.to_crs(crs)
# Set default burn value, index from 1 to len(self.ds)
if in_value is None:
in_value = self.ds.index + 1
# Rasterize geometry
if isinstance(in_value, abc.Iterable):
if len(in_value) != len(vect.geometry): # type: ignore
raise ValueError(
"in_value must have same length as self.ds.geometry, currently {} != {}".format(
len(in_value), len(vect.geometry) # type: ignore
)
)
out_geom = ((geom, value) for geom, value in zip(vect.geometry, in_value))
mask = features.rasterize(shapes=out_geom, fill=out_value, out_shape=out_shape, transform=transform)
elif isinstance(in_value, Number):
mask = features.rasterize(
shapes=vect.geometry, fill=out_value, out_shape=out_shape, transform=transform, default_value=in_value
)
else:
raise ValueError("in_value must be a single number or an iterable with same length as self.ds.geometry")
return mask
def query(self: VectorType, expression: str, inplace: bool = False) -> VectorType:
"""
Query the Vector dataset with a valid Pandas expression.
:param expression: A python-like expression to evaluate. Example: "col1 > col2"
:param inplace: Whether the query should modify the data in place or return a modified copy.
:returns: Vector resulting from the provided query expression or itself if inplace=True.
"""
# Modify inplace if wanted and return the self instance.
if inplace:
self.ds.query(expression, inplace=True)
return self
# Otherwise, create a new Vector from the queried dataset.
new_vector = self.__new__(type(self))
new_vector.__init__(self.ds.query(expression))
return new_vector # type: ignore
| 38.438944 | 119 | 0.603245 | 11,136 | 0.956126 | 0 | 0 | 186 | 0.01597 | 0 | 0 | 5,607 | 0.481412 |
98357169592d3463991af69ec85a5b95cd26dfba | 753 | py | Python | app/setup.py | cleve/varidb | fc1b10aa4d708cee1c83909f10773948cee0c539 | [
"Apache-2.0"
] | null | null | null | app/setup.py | cleve/varidb | fc1b10aa4d708cee1c83909f10773948cee0c539 | [
"Apache-2.0"
] | 6 | 2020-11-05T02:18:15.000Z | 2022-03-12T00:50:09.000Z | app/setup.py | cleve/pulzar | fc1b10aa4d708cee1c83909f10773948cee0c539 | [
"Apache-2.0"
] | null | null | null | from setuptools import setup
# with open("../README.md", "r") as fh:
# long_description = fh.read()
setup(name='pulzar-pkg',
version='21.4.1',
author='Mauricio Cleveland',
author_email='mauricio.cleveland@gmail.com',
description='Distributed database and jobs',
# long_description=long_description,
# long_description_content_type="text/markdown",
# data_files=[('/var/lib/pulzar/data', [])],
url='http://github.com/cleve/pulzar',
packages=['pulzarcore', 'pulzarutils'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: POSIX :: Linux"
],
python_requires='>=3.6',
)
| 32.73913 | 58 | 0.605578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 485 | 0.64409 |
9836b76f5bd54af58f94a1f421103f0a705f31c9 | 3,362 | py | Python | data/sample_hq_aniso.py | aneeshnaik/HernquistFlows | 7f81f9b47297b115ae6b593593aac59afafc48b3 | [
"MIT"
] | null | null | null | data/sample_hq_aniso.py | aneeshnaik/HernquistFlows | 7f81f9b47297b115ae6b593593aac59afafc48b3 | [
"MIT"
] | null | null | null | data/sample_hq_aniso.py | aneeshnaik/HernquistFlows | 7f81f9b47297b115ae6b593593aac59afafc48b3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Sample 10^6 particles from anisotropic Hernquist DF.
Created: February 2021
Author: A. P. Naik
"""
import sys
from emcee import EnsembleSampler as Sampler
import numpy as np
sys.path.append("../src")
from constants import G, M_sun, kpc
from hernquist import calc_DF_aniso
def hernquist_df_aniso(theta, M, a):
"""
Evaluate anisotropic Hernquist distribution function.
Calculates log-probability of given phase space position theta. Functional
form is Eq. (44) in Naik et al., (2020).
Parameters
----------
theta: array-like, shape (6,)
Array containing phase space position (x, y, z, vx, vy, vz). UNITS:
metres and metres/second for positions/velocities respectively.
M: float
Total mass of Hernquist blob. UNITS: kilograms.
a: float
Scale radius of Hernquist blob. UNITS: metres.
Returns
-------
lnf: float
Unnormalised ln-probability associated with phase space position.
"""
q = theta[:3]
p = theta[3:]
f = calc_DF_aniso(q, p, M, a)
if f == 0:
return -1e+20
else:
lnf = np.log(f)
return lnf
def sample(N, M, a):
"""
Sample N particles from isotropic Hernquist distribution function.
Takes either isotropic or anisotropic DF, parametrised by mass M and scale
radius a.
Sampler uses 50 MCMC walkers, each taking N iterations (after burn-in).
These samples are then thinned by an interval of 50, giving N
quasi-independent samples.
Parameters
----------
N: int
Number of particles to sample. Note: this needs to be a multiple of 50.
M: float
Total mass of Hernquist blob. UNITS: kilograms.
a: float
Scale radius of Hernquist blob. UNITS: metres.
Returns
-------
pos: (N, 3) array
Positions of sampled particles, in Cartesian coordinates. UNITS:
metres.
vel: (N, 3) array
Velocities of sampled particles, in Cartesian coordinates. UNITS:
metres/second.
"""
# set up sampler
df_function = hernquist_df_aniso
nwalkers, ndim = 100, 6
n_burnin = 1000
assert N % nwalkers == 0
n_iter = N
s = Sampler(nwalkers, ndim, df_function, args=[M, a])
# set up initial walker positions
v_sig = 0.5 * np.sqrt(G * M / a) / np.sqrt(3)
sig = np.array([0.3 * a, 0.3 * a, 0.3 * a, v_sig, v_sig, v_sig])
p0 = -sig + 2 * sig * np.random.rand(nwalkers, ndim)
# burn in
print("\nBurning in...", flush=True)
s.run_mcmc(p0, n_burnin, progress=True)
# take final sample
p0 = s.chain[:, -1, :]
s.reset()
print("\n\nTaking final sample...", flush=True)
s.run_mcmc(p0, n_iter, progress=True, thin=100)
pos = s.flatchain[:, :3]
vel = s.flatchain[:, 3:]
return pos, vel
def downsample(pos, vel, a, x_truncation):
"""Downsample from truncated Hernquist."""
r = np.linalg.norm(pos, axis=-1)
allowed = np.where(r < x_truncation * a)[0]
inds = np.random.choice(allowed, size=N)
pos = pos[inds]
vel = vel[inds]
return pos, vel
if __name__ == '__main__':
M = 1e+10 * M_sun
a = 5 * kpc
N = 1000000
pos, vel = sample(2 * N, M, a)
pos, vel = downsample(pos, vel, a, x_truncation=200)
np.savez("hq_aniso_orig", pos=pos, vel=vel)
| 25.861538 | 79 | 0.621356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,867 | 0.555324 |
98371d3c3294fca73ee9c19cdbb1162098c454b0 | 1,205 | bzl | Python | csharp/private/sdk.bzl | j3parker/rules_csharp | f5fbbd545b1f18efad5e4ce3d06bfabe6b48eeb4 | [
"Apache-2.0"
] | null | null | null | csharp/private/sdk.bzl | j3parker/rules_csharp | f5fbbd545b1f18efad5e4ce3d06bfabe6b48eeb4 | [
"Apache-2.0"
] | null | null | null | csharp/private/sdk.bzl | j3parker/rules_csharp | f5fbbd545b1f18efad5e4ce3d06bfabe6b48eeb4 | [
"Apache-2.0"
] | null | null | null | """
Declarations for the .NET SDK Downloads URLs and version
These are the URLs to download the .NET SDKs for each of the supported operating systems. These URLs are accessible from: https://dotnet.microsoft.com/download/dotnet-core.
"""
DOTNET_SDK_VERSION = "3.1.100"
DOTNET_SDK = {
"windows": {
"url": "https://download.visualstudio.microsoft.com/download/pr/28a2c4ff-6154-473b-bd51-c62c76171551/ea47eab2219f323596c039b3b679c3d6/dotnet-sdk-3.1.100-win-x64.zip",
"hash": "abcd034b230365d9454459e271e118a851969d82516b1529ee0bfea07f7aae52",
},
"linux": {
"url": "https://download.visualstudio.microsoft.com/download/pr/d731f991-8e68-4c7c-8ea0-fad5605b077a/49497b5420eecbd905158d86d738af64/dotnet-sdk-3.1.100-linux-x64.tar.gz",
"hash": "3687b2a150cd5fef6d60a4693b4166994f32499c507cd04f346b6dda38ecdc46",
},
"osx": {
"url": "https://download.visualstudio.microsoft.com/download/pr/bea99127-a762-4f9e-aac8-542ad8aa9a94/afb5af074b879303b19c6069e9e8d75f/dotnet-sdk-3.1.100-osx-x64.tar.gz",
"hash": "b38e6f8935d4b82b283d85c6b83cd24b5253730bab97e0e5e6f4c43e2b741aab",
},
}
RUNTIME_TFM = "netcoreapp3.1"
RUNTIME_FRAMEWORK_VERSION = "3.1.0"
| 50.208333 | 179 | 0.751867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,003 | 0.832365 |
98385a3a1127e6e3d20efa2b8a3e4996af5d14c7 | 798 | py | Python | exibir_frame.py | rafaelblira/python-progressivo | fdd6ccf10d351d04158e03ff74dd99b431d94303 | [
"MIT"
] | null | null | null | exibir_frame.py | rafaelblira/python-progressivo | fdd6ccf10d351d04158e03ff74dd99b431d94303 | [
"MIT"
] | null | null | null | exibir_frame.py | rafaelblira/python-progressivo | fdd6ccf10d351d04158e03ff74dd99b431d94303 | [
"MIT"
] | null | null | null | from tkinter import *
class MinhaGUI:
def __init__(self):
# Criando a janela principal
self.janela_principal = Tk()
# Criando os frames
self.frame_cima = Frame(self.janela_principal)
self.frame_baixo = Frame(self.janela_principal)
# Criando os labels
self.label1 = Label(self.frame_cima, text='To no frame de cima!')
self.label2 = Label(self.frame_baixo, text='To no frame de baixo!')
# Posicionando os labels nos frames
self.label1.pack(side='top')
self.label2.pack(side='top')
# Posicionando o frame
self.frame_cima.pack()
self.frame_baixo.pack()
# Fazer o Tkinter exibir o looping da janela
mainloop()
minha_gui = MinhaGUI() | 29.555556 | 75 | 0.605263 | 751 | 0.941103 | 0 | 0 | 0 | 0 | 0 | 0 | 222 | 0.278195 |
9838b9feb9100277e59803005d55c82663dd9651 | 1,435 | py | Python | cnc/migrations/0001_initial.py | andrewmallory/pan-cnc | bdd6349820107acf06b607c2dc0154ded468f049 | [
"Apache-2.0"
] | 3 | 2019-03-13T14:59:59.000Z | 2020-04-26T06:30:16.000Z | cnc/migrations/0001_initial.py | andrewmallory/pan-cnc | bdd6349820107acf06b607c2dc0154ded468f049 | [
"Apache-2.0"
] | 29 | 2019-02-05T00:01:32.000Z | 2021-03-22T14:10:07.000Z | cnc/migrations/0001_initial.py | andrewmallory/pan-cnc | bdd6349820107acf06b607c2dc0154ded468f049 | [
"Apache-2.0"
] | 2 | 2019-08-31T13:54:53.000Z | 2020-11-18T16:27:11.000Z | # Generated by Django 3.0.5 on 2020-05-26 13:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RepositoryDetails',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('url', models.CharField(max_length=512)),
('deploy_key_path', models.CharField(default='', max_length=128, null='')),
('deploy_key_priv', models.CharField(default='', max_length=2048, null='')),
('deploy_key_pub', models.CharField(default='', max_length=2048, null='')),
('details_json', models.TextField(max_length=2048)),
],
),
migrations.CreateModel(
name='Skillet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, unique=True)),
('skillet_json', models.TextField(default='', max_length=2048)),
('repository', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cnc.RepositoryDetails')),
],
),
]
| 38.783784 | 123 | 0.587456 | 1,309 | 0.912195 | 0 | 0 | 0 | 0 | 0 | 0 | 235 | 0.163763 |
983909b4ca0faeb47da87aa1e2787965d68182c6 | 29,296 | py | Python | util.py | awb-carleton/pattern-analysis | 532066398f2d102031aaa86b9a7c739ee16ceb9c | [
"MIT"
] | null | null | null | util.py | awb-carleton/pattern-analysis | 532066398f2d102031aaa86b9a7c739ee16ceb9c | [
"MIT"
] | null | null | null | util.py | awb-carleton/pattern-analysis | 532066398f2d102031aaa86b9a7c739ee16ceb9c | [
"MIT"
] | null | null | null | # coding: utf-8
import os, pickle, csv, json
import subprocess
from typing import NamedTuple, List, TextIO, Tuple, Dict, Optional, Union, Iterable, Hashable
import numpy as np
import pandas as pd
from scipy import stats
from itertools import product, groupby, takewhile
from collections import namedtuple, Counter
import multiprocessing
import logging
import string
import matplotlib
matplotlib.use("Agg")
# pids with missing data (i.e., pdbs missing for either sid, eid, and/or gid)
pids_missing_data = {'2000524',
'2001234',
'2001249',
'2001255',
'2001287',
'2001291',
'2001306',
'2001308',
'2001311',
'2002239',
'2002243',
'2002247',
'2002255',
'2002713',
'2002963',
'2002990',
'2002992',
'2003008',
'2003011',
'2003015',
'997529',
'996023'}
unfetched_pids = {'2000659',
'2001302',
'2002102',
'2002465',
'2002809',
'2002833',
'2002850',
'2003001',
'2003047',
'2003059',
'2003078',
'2003126',
'2003183',
'996313',
'996492',
'996508',
'997542',
'997940',
'998465',
'998529',
'998574'}
# fetched, but corrupt
bad_pids = {'1998935',
'2000659',
'2001302',
'2002102',
'2002465',
'2002809',
'2002833',
'2002850',
'2003078',
'2003126',
'2003183',
'2003763',
'2003832',
'997766'}
# stopped early due to crashes or errors
stopped_pids = {'2003699',
'2003183',
'2002494',
'2002247',
'2002912',
'2003801'}
# restarted version of stopped puzzle
restarted_pids = {'2003704',
'2002499',
'2002255',
'2002914',
'2003806'}
pids_missing_energies = {'996547'}
pids_missing_pdl_actions = {'998071',
'1998729',
'998219'}
skip_pids = pids_missing_energies.union(pids_missing_pdl_actions).union(bad_pids)
class EnergyComponent(NamedTuple):
name: str
weight: float
energy: float
class PDB_Info(NamedTuple):
sid: str
pid: str
uid: str
gid: str
sharing_gid: str
scoretype: str
pdl: Dict
energy: float
energy_components: List[EnergyComponent]
timestamp: int
parent_sid: Optional[str]
tmscore: float
deviations: np.ndarray
class SnapshotDelta(NamedTuple):
sid: str
parent_sid: Optional[str]
timestamp: int
action_diff: Counter
macro_diff: Counter
action_count: int
energy_diff: float
class SolvingLineVariant(NamedTuple):
action_count: int
time: int
indices: List[int]
class SolvingLine(NamedTuple):
action_count: int
time: int
pdb_infos: List[PDB_Info]
variants: List[SolvingLineVariant]
@property
def energies(self):
return [x.energy for x in self.pdb_infos]
class EvolvingLine(NamedTuple):
source: Dict
pdb_infos: List[PDB_Info]
@property
def energies(self):
return [x.energy for x in self.pdb_infos]
class PuzzleMeta(NamedTuple):
pid: str
best_tmscores: Dict
pfront: np.ndarray
upload_baseline: float
energy_baseline: float
structure: Dict
class PatternInstance(NamedTuple):
cid: int
uid: str
pid: str
start_idx: int
end_idx: int
class PatternInstanceExt(NamedTuple):
cid: int
uid: str
pid: str
start_idx: int
end_idx: int
start_pdb: PDB_Info
end_pdb: PDB_Info
pre_best: PDB_Info
post_best: PDB_Info
class SubPatternInstance(NamedTuple):
p: PatternInstance
label: str
start_idx: int
end_idx: int
class SubLookup(NamedTuple):
clusters: Dict[str, Dict[int, Dict[int, Dict[int, np.ndarray]]]] # (user to k to cid to sub_k to cluster labels)
mrfs: Dict[str, Dict[int, Dict[int, Dict[int, Dict[int, np.ndarray]]]]] # (user to k to cid to sub_k to mrf dictionary (cluster label to mrf))
models: Dict[str, Dict[int, Dict[int, Dict[int, Dict]]]] # (user to k to cid to sub_k to dict of ticc model parameters)
bics: Dict[str, Dict[int, Dict[int, Dict[int, float]]]] # (user to k to cid to sub_k to bic)
class SubSeriesLookup(NamedTuple):
patterns: Dict[Hashable, np.ndarray] # e.g., (uid, pid, start index) -> series for that pattern
series: np.ndarray
idx_lookup: Dict[Hashable, Tuple[int, int]]
class SubclusterSeries(NamedTuple):
labels: List[str]
series: np.ndarray
# type aliases
SubClusters = Dict[int, Dict[int, Dict[int, np.ndarray]]]
SubMRFs = Dict[int, Dict[int, Dict[int, Dict[int, np.ndarray]]]]
PatternLookup = Union[Dict[str, Iterable[PatternInstance]], Dict[int, Dict[int, Iterable[PatternInstance]]]]
@pd.api.extensions.register_series_accessor("foldit")
class FolditSeriesAccessor:
def __init__(self, pandas_obj: pd.Series):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj: pd.Series):
# verify there is a column latitude and a column longitude
if ('lines' not in obj.index or 'evol_lines' not in obj.index) and (obj.name != "lines" and obj.name != "evol_lines"):
raise AttributeError("Must have 'lines' and 'evol_lines'.")
@property
def solo_pdbs(self):
return [p for l in self._obj.lines for p in l.pdb_infos] if self._obj.lines else []
@property
def evol_pdbs(self):
return [p for l in self._obj.evol_lines for p in l.pdb_infos] if self._obj.evol_lines else []
@property
def solo_energies(self):
return [p.energy for p in self._obj.foldit.solo_pdbs]
@property
def evol_energies(self):
return [p.energy for p in self._obj.foldit.evol_pdbs]
@pd.api.extensions.register_dataframe_accessor("foldit")
class FolditAccessor:
def __init__(self, pandas_obj: pd.Series):
self._validate(pandas_obj)
self._obj = pandas_obj
@staticmethod
def _validate(obj: pd.Series):
# verify there is a column latitude and a column longitude
if 'lines' not in obj.columns or 'evol_lines' not in obj.columns:
raise AttributeError("Must have 'lines' and 'evol_lines'.")
@property
def solo_pdbs(self):
return self._obj.apply(lambda r: r.foldit.solo_pdbs, axis=1)
@property
def evol_pdbs(self):
return self._obj.apply(lambda r: r.foldit.evol_pdbs, axis=1)
@property
def solo_energies(self):
return self._obj.apply(lambda r: r.foldit.solo_energies, axis=1)
@property
def evol_energies(self):
return self._obj.apply(lambda r: r.foldit.evol_energies, axis=1)
# @property
# def pdbs(self):
ROOT_NID = ('00000000-0000-0000-0000-000000000000', 0)
category_lookup = {
'overall': '992758',
'beginner': '992759',
'prediction': '992760',
'design': '992761',
'electron': '994237',
'contacts': '997946',
'symmetry': '992769',
'casp10': '992762',
'casp11': '997398',
'casp_roll': '993715',
'hand_folding': '994890',
'small_molecule_design': '2002074',
"pilot": "2004148",
'all': 'all', # dummy to allow select of all categorized puzzles
}
action_types = {
'optimize': {'ActionGlobalMinimize', 'ActionGlobalMinimizeBackbone', 'ActionGlobalMinimizeSidechains',
'ActionLocalMinimize', 'ActionRepack'},
'hybrid': {'ActionLocalMinimizePull', 'LoopHash', 'ActionBuild', 'ActionPullSidechain', 'ActionTweak',
'ActionRebuild'},
'manual': {'ActionSetPhiPsi', 'ActionJumpWidget', 'ActionRotamerCycle', 'ActionRotamerSelect'},
'guiding': {'ActionInsertCut', 'ActionLockToggle', 'ActionCopyToggle', 'ActionSecStructAssignHelix',
'ActionSecStructAssignLoop', 'ActionSecStructAssignSheet', 'ActionSecStructDSSP', 'ActionSecStructDrag',
'ActionBandAddAtomAtom', 'ActionBandAddDrag', 'ActionBandAddResRes', 'ActionBandDrag',
'ActionBandLength', 'ActionBandStrength'},
}
action_types['deliberate'] = action_types['hybrid'].union(action_types['manual']).union(action_types['guiding'])
def rmse(predictions, targets):
return np.sqrt(((predictions - targets) ** 2).mean())
def iden(x):
return x
def get_ranks(datafile):
puzzles = {}
with open("{}.csv".format(datafile)) as fp:
ranks_in = csv.DictReader(fp)
for row in ranks_in:
row['energy'] = float(row['best_score'])
row['best_score'] = max(float(row['best_score']) * -10 + 8000, 0)
pid = row['pid']
if pid not in puzzles:
puzzles[pid] = {
'groups': {},
'soloists': [],
'evolvers': [],
'categories': []
}
if row['gid'] == '0':
row['gid'] = 'NULL' # no sense in having both 0 and NULL for no group
gid = row['gid']
if gid != 'NULL':
gs = puzzles[pid]['groups']
if gid not in gs:
gs[gid] = {
'score': row['best_score'],
'type': row['type'],
'gid': gid,
'uid': row['uid'],
}
if gs[gid]['score'] < row['best_score']:
gs[gid]['score'] = row['best_score']
gs[gid]['type'] = row['type']
gs[gid]['uid'] = row['uid']
if row['type'] == '1':
puzzles[pid]['soloists'].append(row)
if row['type'] == '2':
puzzles[pid]['evolvers'].append(row)
for pid in puzzles:
p = puzzles[pid]
p['groups'] = list(p['groups'].values())
# reverse sorts to put them in descending order (top ranked should be first)
p['groups'].sort(key=lambda x: x['score'], reverse=True)
for i, g in enumerate(p['groups']):
g['rank'] = i
g['norm_rank'] = i / len(p['groups'])
p['soloists'].sort(key=lambda x: x['best_score'], reverse=True)
for i, s in enumerate(p['soloists']):
s['rank'] = i
s['norm_rank'] = i / len(p['soloists'])
p['evolvers'].sort(key=lambda x: x['best_score'], reverse=True)
for i, e in enumerate(p['evolvers']):
e['rank'] = i
e['norm_rank'] = i / len(p['evolvers'])
return puzzles
def get_ranks_labeled():
puzzles = get_ranks("data/rprp_puzzle_ranks_latest")
with open("data/puzzle_categories_latest.csv") as fp:
cat_in = csv.DictReader(fp)
for r in cat_in:
pid = r['nid']
if pid in puzzles:
puzzles[pid]['categories'] = r['categories'].split(',')
puzzles[pid]['categories'].append('all')
with open("data/puzzle_labels_latest.json") as fp:
lab_in = json.load(fp)
for r in lab_in:
pid = r['pid']
if pid in puzzles:
assert r['title'] is not None
puzzles[pid]['title'] = r['title']
if r['desc'] is not None:
puzzles[pid]['desc'] = r['desc']
return puzzles
def add_pdbs_to_ranks(puzzles):
print("loading pdbs")
with open("data/top_pdbs.pickle", 'rb') as pdb_fp:
pdbs = pickle.load(pdb_fp)
pdbs = [p for p in pdbs if 'PID' in p and len(p['PDL']) > 0]
print("grouping pdbs")
pdbs_by_pid = {pid: list(g) for pid, g in groupby(pdbs, lambda p: p['PID'])}
for pid in pids_missing_data.union(unfetched_pids):
pid in puzzles and puzzles.pop(pid)
for pid in puzzles.copy():
pid not in pdbs_by_pid and puzzles.pop(pid)
for pid, ps in pdbs_by_pid.items():
if pid in puzzles:
puzzles[pid]['pdbs'] = ps
def sig_test(a, b, fstr="{} (n={}) {} (n={})", normal=False, thresholds=frozenset()):
if normal:
t, p = stats.ttest_ind(a, b, equal_var=False)
else:
U2, p = stats.mannwhitneyu(np.array(a), np.array(b), use_continuity=True, alternative='two-sided')
U = min(U2, len(a) * len(b) - U2)
N = len(a) * len(b)
f = len(list(filter(lambda xy: xy[0] > xy[1], product(a, b)))) / N
u = len(list(filter(lambda xy: xy[0] < xy[1], product(a, b)))) / N
if ('p' not in thresholds or p < thresholds['p']) and ('r' not in thresholds or abs(f - u) > thresholds['r']):
print(fstr.format("mean={:.6f}, median={:.6f}, std={:.6f}".format(np.mean(a), np.median(a), np.std(a)), len(a),
"mean={:.6f}, median={:.6f}, std={:.6f}".format(np.mean(b), np.median(b), np.std(b)), len(b)))
if normal:
print("test statistic t: {:.6f}".format(t))
else:
print("Mann Whitney U: {:.6f}".format(U))
print("significance (two-tailed): {:.6f}".format(p))
print("rank-biserial correlation: {:.3f}".format(f - u))
return p, f - u
def get_atoms(pdb):
raw = [[float(x) for x in s.strip(' "[]').split(" ")] for s in pdb['ca'].split(",")]
if all(k == 0 for k in raw[-1]):
return np.array(raw[:-1])
# remove spurious atom at 0 0 0 that appears at the end of each of these
return np.array(raw)
def rmsd(X, Y):
# center of mass
X = X - X.mean(axis=0)
Y = Y - Y.mean(axis=0)
# covariance matrix
R = np.dot(X.T, Y)
V, S, Wt = np.linalg.svd(R)
d = (np.linalg.det(V) * np.linalg.det(Wt)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
U = np.dot(V, Wt)
Xp = np.dot(X, U)
deviations = np.linalg.norm(Xp - Y, axis=1)
return (deviations ** 2).sum() ** 0.5, deviations
# https://github.com/charnley/rmsd
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC1471868/
# https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4321859/
def weighted_rmsd(X, Y, p=50):
weights = np.array([[1]] * len(Y))
wrmsd = 0
wrmsd_old = float('inf')
i = 0
# there may be rare cases where this doesn't converge, so limit to 1000 iterations just in case
while abs(wrmsd - wrmsd_old) > 1e-6 and i < 1000:
i += 1
wrmsd_old = wrmsd
# weighted center of mass
X = X - (weights * X).mean(axis=0)
Y = Y - (weights * Y).mean(axis=0)
# weighted covariance matrix
R = np.dot(X.T, weights * Y)
V, S, Wt = np.linalg.svd(R)
d = (np.linalg.det(V) * np.linalg.det(Wt)) < 0.0
if d:
S[-1] = -S[-1]
V[:, -1] = -V[:, -1]
U = np.dot(V, Wt)
Xp = np.dot(X, U)
deviations = np.linalg.norm(Xp - Y, axis=1)
wrmsd = ((weights.flatten() * deviations ** 2).sum() / weights.sum()) ** 0.5
dp = np.percentile(deviations, p)
weights = np.exp(-deviations ** 2 / dp ** 2).reshape((len(deviations), 1))
return wrmsd, weights, deviations
# take in index i and series of Unix-style timestamps
# return indices start, end representing the period containing i with no gaps of size break_threshold or larger
# break_threshold given in seconds
def expand_seed(i, timestamps, break_threshold=900):
start = end = i
i = end + 1
while i < len(timestamps) and timestamps[i] - timestamps[end] < break_threshold:
end = i
i += 1
return start, end
# takes in list of Unix-style timestamps
def get_sessions(timestamps):
sessions = []
i = 0
while i < len(timestamps):
sessions.append(expand_seed(i, timestamps))
i = sessions[-1][1] + 1
return sessions
def time_splits_helper(timestamps, chunk, splits):
sessions = get_sessions(timestamps)
start_idx = end_idx = 0
time_left = chunk
session_idx = 0
ret = []
times = []
for i in range(splits):
logging.debug('split {}'.format(i))
while time_left > 0 and session_idx < len(sessions):
logging.debug('time left {}'.format(time_left))
ses = sessions[session_idx]
session_start, session_end = ses
if session_duration(ses, timestamps) <= time_left:
logging.debug('session {} {} fits'.format(session_idx, sessions[session_idx]))
end_idx = session_end
time_left -= session_duration(ses, timestamps)
session_idx += 1
if session_idx == len(sessions):
logging.debug('adding {} to the end'.format(start_idx))
ret.append((start_idx, len(timestamps)))
times.append(sum(session_duration((s, e), timestamps) for s, e in sessions if s >= start_idx))
logging.debug('time: {}'.format(times[-1]))
else:
ns, ne = sessions[session_idx]
minimal_addition = session_duration((ns, ne), timestamps) if ns == ne else timestamps[ns + 1] - \
timestamps[ns]
# the minimum we could add to the current split would put us further away than we currently are
if abs(time_left - minimal_addition) > abs(time_left):
times.append(session_duration((session_start, end_idx), timestamps) + sum(
session_duration((s, e), timestamps) for s, e in sessions if
s >= start_idx and e < session_start))
if start_idx == end_idx:
end_idx += 1
logging.debug("close as we can get, adding {} up to {}".format(start_idx, end_idx))
ret.append((start_idx, end_idx))
logging.debug('time: {}'.format(times[-1]))
start_idx = end_idx
time_left = 0
else:
if session_start == session_end:
end_idx = session_end
else:
end_idx = session_start + 1
while session_duration((session_start, end_idx), timestamps) < time_left:
end_idx += 1
if abs(time_left - (timestamps[end_idx] - timestamps[session_start])) > abs(
time_left - (
timestamps[end_idx - 1] - timestamps[session_start])) and end_idx > start_idx + 1:
end_idx -= 1
logging.debug('splitting session at {}'.format(end_idx))
sessions[session_idx] = (end_idx, session_end)
times.append(session_duration((session_start, end_idx), timestamps) + sum(
session_duration((s, e), timestamps) for s, e in sessions if s >= start_idx and e < session_start))
if start_idx == end_idx:
end_idx += 1
logging.debug('adding {} up to {}'.format(start_idx, end_idx))
ret.append((start_idx, end_idx))
logging.debug('time: {}'.format(times[-1]))
start_idx = end_idx
time_left = 0
time_left = chunk
return ret, times
def get_time_splits(time, timestamps, splits):
chunk = time / splits
ret, times = time_splits_helper(timestamps, chunk, splits)
while len(ret) < splits - 1 and len(timestamps) >= 2 * splits:
chunk *= 0.9
logging.debug("bad split possibly due to degenerate chunk size, trying with {}".format(chunk))
ret, times = time_splits_helper(timestamps, chunk, splits)
if len(ret) == splits - 1 and any(e - s > 0 for s, e in ret):
idx = np.argmax([t if s != e else 0 for (s, e), t in zip(ret, times)])
shifted = ret[:idx] + [(ret[idx][0], ret[idx][1] - 1)] + [(s - 1, e - 1) for s, e in ret[idx + 1:]] + [
(ret[-1][1] - 1, ret[-1][1])]
logging.debug("short one slice, shifting everything to make another ({} to {})".format(ret, shifted))
ret = shifted
if ret[-1][1] < len(timestamps):
logging.debug("extending final slice to the end ({} to {})".format(ret[-1][1], len(timestamps)))
ret = ret[:-1] + [(ret[-1][0], len(timestamps))]
assert len(ret) == splits or len(timestamps) < 2 * splits, "{} -- wanted {} splits, got {}".format(ret, splits,
len(ret))
# assert len(ret) == splits or splits > 12, "{} -- wanted {} splits, got {}".format(ret, splits, len(ret))
assert all(e1 == s2 for (s1, e1), (s2, e2) in zip(ret, ret[1:]))
covered_timestamps = np.concatenate([np.arange(s, e) for s, e in ret])
assert all(x in covered_timestamps for x in range(len(timestamps))), \
"{} -- not all timestamp indices accounted for: {}".format(ret, [x for x in range(len(timestamps)) if
x not in covered_timestamps])
# allowed_deviation = max([max(np.diff(timestamps[s:e]), default=0) for s, e in get_sessions(timestamps) if s != e], default=300) / 2
# assert all(abs(t - chunk) < max(allowed_deviation, chunk * 0.1) for t in times[:-1]) or len(timestamps) <= 2 * splits, \
# "{} -- splits deviate too far from target size ({} ± {}): {}".format(ret, chunk, max(allowed_deviation, chunk * 0.1), times)
return ret
def session_duration(session, timestamps):
st, en = session
if st == en:
# assume 5 minutes of work where we have just a single snapshot for the session, based on standard upload rate
return 300
return timestamps[en] - timestamps[st]
def time_played(timestamps):
return sum(session_duration(ses, timestamps) for ses in get_sessions(timestamps))
def align_timestamps(timestamps):
sessions = [slice(s, e + 1) for s, e in get_sessions(timestamps)]
ts_aligned = timestamps
for ses1, ses2 in zip(sessions, sessions[1:]):
adjustment = ts_aligned[ses2.start] - ts_aligned[ses1.stop - 1]
ts_aligned = np.concatenate((ts_aligned[:ses2.start], ts_aligned[ses2.start:] - adjustment + 900))
assert (np.diff(ts_aligned) <= 900).all()
return ts_aligned
def get_children(nid, history):
uuid, count = nid
main = list(filter(lambda x: x['uuid'] == uuid, history[nid])) if nid in history else []
if len(main) == 0 and count != 0:
main = list(filter(lambda x: x['uuid'] == uuid and x['count'] > count, history[(uuid, 0)]))
other = list(
filter(lambda x: x['uuid'] != uuid and x['parent_count'] == count, history[nid])) if nid in history else []
children = []
if len(main) > 0:
c = min(main, key=lambda x: x['count'])
children.append((c['uuid'], c['count']))
for r in other:
children.append((r['uuid'], r['count']))
return children
def get_nid(s):
return (s['uuid'], int(s['count']))
def output_atoms(atoms: np.ndarray, fp: TextIO) -> None:
i = 1
for ca in atoms:
fp.write("ATOM {:6d} CA XXX A {:3d} {:8.3f}{:8.3f}{:8.3f} 1.00 0.00\n".format(i, i, *ca))
i += 1
def tmscore(pairs: List[Tuple[str, str]], tmp_input_name: str, atoms_lookup: Dict) -> Dict:
if len(pairs) == 0:
return {}
logging.debug("{}: batch computing {} tmscores".format(tmp_input_name, len(pairs)))
if not os.path.exists(tmp_input_name):
os.makedirs(tmp_input_name)
# write the necessary atom files
sids = {s for ss in pairs for s in ss}
for sid in sids:
with open("{}/{}.atoms".format(tmp_input_name, sid), 'w') as fp:
output_atoms(atoms_lookup[sid], fp)
# empirically derived formula for chunksize to equalize batch time and spawning time
# based on estimates that batches run 100 scores in ~1.5s, and Python starts ~6 batches per second
chunksize = max(100, (len(pairs) / 0.09) ** 0.5)
if len(pairs) // chunksize > (multiprocessing.cpu_count() / 4):
chunksize = len(pairs) / (
multiprocessing.cpu_count() / 4) # avoid spawning huge numbers of batches as this kills the performance
splits = np.array_split(pairs, len(pairs) // chunksize if len(pairs) > chunksize else 1)
ps = []
for i, split in enumerate(splits):
input_name = "{}/{}.tmscore_input".format(tmp_input_name, i)
with open(input_name, 'w') as fp:
for a, b in split:
fp.write("{} {}\n".format(a, b))
ps.append((subprocess.Popen(['./tmscore_batch.zsh', input_name], stdout=subprocess.PIPE, encoding='utf-8'),
input_name))
scores = []
for p, fname in ps:
scores.extend([s.split() for s in p.communicate()[0].splitlines()])
subprocess.run(['rm', fname])
subprocess.run(["rsync", "-a", "--delete", "tmp_data/empty_dir/", "{}/".format(tmp_input_name)])
return {(a, b): float(s) for a, b, s in scores}
def get_overlap(segment, target):
seg_sessions = get_sessions(segment)
tar_sessions = get_sessions(target)
tar_adj = []
for s, e in tar_sessions:
cands = [ses for ses in seg_sessions if target[s] < segment[ses[1]] and target[e] > segment[ses[0]]]
if len(cands) > 0:
start = s
while all(target[start] < segment[cs] for cs, ce in cands):
start += 1
# assert start < e
end = e
while all(target[end] > segment[ce] for cs, ce in cands):
end -= 1
# assert end >= start
if start <= end:
tar_adj.append((start, end))
return tar_adj
def load_frame(datafile):
df = pd.read_hdf(datafile, 'df')
bts = pd.read_hdf(datafile, 'bts')
puz = pd.read_hdf(datafile, 'puz').iloc[0] # tuple gets wrapped in a pandas data structure, so unwrap it here
logging.debug(datafile)
return df, bts, puz
def collect_pdl_entries(soln):
entries = list(takewhile(lambda x: x['header']['uid'] == soln.uid, soln.pdl[::-1]))
actions = {}
macros = {}
for e in entries:
for a, c in e['actions'].items():
actions[a] = actions.get(a, 0) + c
for m, c in e['macros'].items():
macros[m] = macros.get(m, 0) + c
return actions, macros
def get_data_value(uid, pid, key, data):
r = data[(data.uid == uid) & (data.pid == pid)]
return r[key].iloc[0]
def get_action_labels():
return ['band', 'build', 'cut', 'global_min', 'idealize', 'local_min', 'lock',
'rebuild', 'repack', 'assign_loop', 'save', 'reset', 'ss_load', 'ss_save']
def get_action_keys():
"""
index: action type
0: banding
1: build
2: cuts
3: global minimize
4: idealize
5: local minimize
6: locking
7: rebuild
8: repack
9: assign secondary structure loop
10: quicksave
11: reset recent best
12: load secondary structure
12: save secondary structure
"""
actionset_band = {'ActionBandAddAtomAtom',
'ActionBandAddDrag',
'ActionBandAddResRes',
'ActionBandDrag',
'ActionBandLength',
'ActionBandStrength',
'ActionBandDelete',
'ActionBandDisableToggle'}
actionset_cut = {'ActionDeleteCut',
'ActionInsertCut'}
actionset_global = {'ActionGlobalMinimize',
'ActionGlobalMinimizeBackbone',
'ActionGlobalMinimizeSidechains'}
actionset_save = {'ActionStandaloneQuicksave', 'ActionNoviceQuicksave'}
actionset_load = {'ActionStandaloneResetRecentBest', 'ActionNoviceResetRecentBest'}
actionset_ss_save = {'ActionStandaloneSecstructSave', 'ActionNoviceSecstructSave'}
actionset_ss_load = {'ActionStandaloneSecstructLoad', 'ActionNoviceSecstructLoad'}
return [actionset_band, {'ActionBuild'}, actionset_cut, actionset_global, {'ActionIdealize'},
{'ActionLocalMinimize'},
{'ActionLockToggle'}, {'ActionRebuild'}, {'ActionRepack'}, {'ActionSecStructAssignLoop'}, actionset_save,
actionset_load, actionset_ss_load, actionset_ss_save]
def get_action_stream(action_diff: Counter):
keys = get_action_keys()
return [sum(action_diff.get(a, 0) for a in k) for k in keys]
def get_pattern_label(p, cid, sub_k):
if sub_k == 0:
assert p.cid == cid
return str(cid)
return str(cid) + string.ascii_uppercase[p.cid]
| 36.528678 | 151 | 0.563183 | 4,387 | 0.149742 | 0 | 0 | 2,115 | 0.072192 | 0 | 0 | 7,239 | 0.24709 |
98398db197820a44e5b1908f15ab56caa4e45c32 | 15,604 | py | Python | panaroo/merge_graphs.py | chrisruis/panaroo | e0ba5db1681134de7f9e483c97af7673e69d5498 | [
"MIT"
] | null | null | null | panaroo/merge_graphs.py | chrisruis/panaroo | e0ba5db1681134de7f9e483c97af7673e69d5498 | [
"MIT"
] | null | null | null | panaroo/merge_graphs.py | chrisruis/panaroo | e0ba5db1681134de7f9e483c97af7673e69d5498 | [
"MIT"
] | null | null | null | import os
import tempfile
import shutil
import argparse
import networkx as nx
from tqdm import tqdm
from joblib import Parallel, delayed
from collections import defaultdict, Counter
import math
import numpy as np
from .isvalid import *
from .__init__ import __version__
from .cdhit import run_cdhit
from .clean_network import collapse_families, collapse_paralogs
from .generate_output import *
def make_list(inp):
if not type(inp) == list:
inp = [inp]
return inp
def load_graphs(graph_files, n_cpu=1):
graphs = []
for graph_file in graph_files:
if not os.path.isfile(graph_file):
print("Missing:", graph_file)
raise RuntimeError("Missing graph file!")
graphs = [nx.read_gml(graph_file) for graph_file in tqdm(graph_files)]
return graphs
def cluster_centroids(graphs,
outdir,
len_dif_percent=0.95,
identity_threshold=0.95,
n_cpu=1):
# create the files we will need
temp_input_file = tempfile.NamedTemporaryFile(delete=False, dir=outdir)
temp_input_file.close()
temp_output_file = tempfile.NamedTemporaryFile(delete=False, dir=outdir)
temp_output_file.close()
# create input for cdhit
with open(temp_input_file.name, 'w') as outfile:
for i, G in enumerate(graphs):
for node in G.nodes():
outfile.write(">" + str(i) + "_" + node + '\n')
seqs = G.nodes[node]["protein"].split(";")
seqs = [s for s in seqs if "*" not in s]
outfile.write(max(seqs, key=len) + "\n")
# Run cd-hit
run_cdhit(temp_input_file.name,
temp_output_file.name,
id=identity_threshold,
s=len_dif_percent,
accurate=True,
n_cpu=n_cpu)
# Process output
clusters = []
with open(temp_output_file.name + ".clstr", 'rU') as infile:
c = []
for line in infile:
if line[0] == ">":
clusters.append(c)
c = []
else:
temp = line.split(">")[1].split("...")[0].split("_")
centroid = (int(temp[0]), temp[1])
c.append(centroid)
clusters.append(c)
clusters = clusters[1:]
# remove temporary files
os.remove(temp_input_file.name)
os.remove(temp_output_file.name)
os.remove(temp_output_file.name + ".clstr")
# need to add new centroidsssss!!!
return clusters
def simple_merge_graphs(graphs, clusters):
# Here, we only merge nodes that don't conflict
# first rename each graphs nodes in preperation for merge
# get mapping
mapping = defaultdict(dict)
nnodes = 0
reverse_mapping = defaultdict(list)
merge_centroids = {}
centroid_context = defaultdict(list)
for c, cluster in enumerate(clusters):
c = str(c)
nnodes += 1
graph_clust_count = Counter()
for n in cluster:
graph_clust_count[n[0]] += 1
non_conflicting_nodes = [
n for n in cluster if graph_clust_count[n[0]] < 2
]
conflicting_nodes = [n for n in cluster if graph_clust_count[n[0]] > 1]
for n in non_conflicting_nodes:
mapping[n[0]][n[1]] = nnodes
merge_centroids[nnodes] = c
reverse_mapping[nnodes].append((n[0], nnodes))
for n in conflicting_nodes:
nnodes += 1
mapping[n[0]][n[1]] = nnodes
merge_centroids[nnodes] = c
centroid_context[c].append([nnodes, n[0]])
reverse_mapping[nnodes].append((n[0], nnodes))
# rename
for i, G in enumerate(graphs):
nx.relabel_nodes(G, mapping[i], copy=False)
# merge graphs
merged_G = nx.compose_all(graphs)
# fix up node attributes
for node in merged_G.nodes():
size = 0
members = set()
lengths = []
centroid = []
seqIDs = set()
protein = []
dna = []
annotation = []
description = []
paralog = False
hasEnd = False
mergedDNA = False
for prev in reverse_mapping[node]:
size += graphs[prev[0]].nodes[prev[1]]['size']
members |= set([
str(prev[0]) + "_" + str(m)
for m in make_list(graphs[prev[0]].nodes[prev[1]]['members'])
])
lengths += make_list(graphs[prev[0]].nodes[prev[1]]['lengths'])
centroid += [
str(prev[0]) + "_" + str(m) for m in make_list(graphs[
prev[0]].nodes[prev[1]]['centroid'].split(";"))
]
seqIDs |= set([
str(prev[0]) + "_" + d
for d in make_list(graphs[prev[0]].nodes[prev[1]]['seqIDs'])
])
protein += make_list(
graphs[prev[0]].nodes[prev[1]]['protein'].split(";"))
dna += make_list(graphs[prev[0]].nodes[prev[1]]['dna'].split(";"))
annotation += make_list(
graphs[prev[0]].nodes[prev[1]]['annotation'])
description += make_list(
graphs[prev[0]].nodes[prev[1]]['description'])
paralog = (paralog or graphs[prev[0]].nodes[prev[1]]['paralog'])
hasEnd = (paralog or graphs[prev[0]].nodes[prev[1]]['hasEnd'])
mergedDNA = (paralog
or graphs[prev[0]].nodes[prev[1]]['mergedDNA'])
merged_G.nodes[node]['size'] = size
merged_G.nodes[node]['members'] = set(members)
merged_G.nodes[node]['lengths'] = lengths
merged_G.nodes[node]['prevCentroids'] = str(
merge_centroids[node]) #";".join(centroid)
merged_G.nodes[node]['seqIDs'] = set(seqIDs)
merged_G.nodes[node]['hasEnd'] = hasEnd
merged_G.nodes[node]['dna'] = [max(dna, key=len)]
merged_G.nodes[node]['protein'] = [max(protein, key=len)]
merged_G.nodes[node]['annotation'] = ";".join(annotation)
merged_G.nodes[node]['description'] = ";".join(description)
merged_G.nodes[node]['paralog'] = paralog
merged_G.nodes[node]['mergedDNA'] = mergedDNA
merged_G.nodes[node]['centroid'] = [str(merge_centroids[node])]
# fix longcentroid
if len(merged_G.nodes[node]['centroid']) != len(
merged_G.nodes[node]['protein']):
print(merged_G.nodes[node]['protein'])
print(merged_G.nodes[node]['centroid'])
raise RuntimeError("protein/centroid count mismatch!")
for node in merged_G.nodes():
merged_G.nodes[node]['longCentroidID'] = max([
(len(s), sid) for s, sid in zip(merged_G.nodes[node]['protein'],
merged_G.nodes[node]['centroid'])
])
merged_G.nodes[node]['maxLenId'] = max([
(len(s), index)
for s, index in zip(merged_G.nodes[node]['dna'],
range(len(merged_G.nodes[node]['dna'])))
])[1]
# fix up edge attributes
for edge in merged_G.edges():
merged_G[edge[0]][edge[1]]['weight'] = 0
merged_G[edge[0]][edge[1]]['members'] = set()
for prev1 in reverse_mapping[edge[0]]:
for prev2 in reverse_mapping[edge[1]]:
if prev1[0] == prev2[0]: #same graph
if graphs[prev1[0]].has_edge(prev1[1], prev2[1]):
merged_G[edge[0]][edge[1]]['weight'] += 1
merged_G[edge[0]][edge[1]]['members'] |= set([
str(prev1[0]) + "_" + str(m) for m in graphs[
prev1[0]][prev1[1]][prev2[1]]['members']
])
return merged_G, centroid_context
def get_options():
import argparse
description = 'Merge independent runs of Panaroo'
parser = argparse.ArgumentParser(description=description,
prog='panaroo_merge_graphs')
io_opts = parser.add_argument_group('Input/output')
io_opts.add_argument(
"-d",
"--directories",
dest="directories",
required=True,
help="Location of seperate Panaroo output directories",
nargs='+')
io_opts.add_argument("-o",
"--out_dir",
dest="output_dir",
required=True,
help="location of a new output directory",
type=lambda x: is_valid_folder(parser, x))
matching = parser.add_argument_group('Matching')
matching.add_argument("-c",
"--threshold",
dest="id",
help="sequence identity threshold (default=0.95)",
default=0.95,
type=float)
matching.add_argument(
"-f",
"--family_threshold",
dest="family_threshold",
help="protein family sequence identity threshold (default=0.7)",
default=0.7,
type=float)
matching.add_argument("--len_dif_percent",
dest="len_dif_percent",
help="length difference cutoff (default=0.95)",
default=0.95,
type=float)
parser.add_argument(
"--min_edge_support_sv",
dest="min_edge_support_sv",
help=(
"minimum edge support required to call structural variants" +
" in the presence/absence sv file (default=max(2, 0.01*n_samples))"
),
type=int)
# Other options
parser.add_argument("-t",
"--threads",
dest="n_cpu",
help="number of threads to use (default=1)",
type=int,
default=1)
parser.add_argument("--verbose",
dest="verbose",
help="print additional output",
action='store_true',
default=False)
parser.add_argument('--version',
action='version',
version='%(prog)s ' + __version__)
args = parser.parse_args()
return (args)
def main():
args = get_options()
# make sure trailing forward slash is present
args.output_dir = os.path.join(args.output_dir, "")
args.directories = [os.path.join(d, "") for d in args.directories]
# Create temporary directory
temp_dir = os.path.join(tempfile.mkdtemp(dir=args.output_dir), "")
print(
"Merging graphs is still under active development and may change frequently!"
)
# Load graphs
print("Loading graphs...")
graphs = load_graphs([d + "final_graph.gml" for d in args.directories],
n_cpu=args.n_cpu)
# cluster centroids
print("Clustering centroids...")
clusters = cluster_centroids(graphs=graphs,
outdir=temp_dir,
len_dif_percent=args.len_dif_percent,
identity_threshold=args.id,
n_cpu=args.n_cpu)
# perform initial merge
print("Performing inital merge...")
G, centroid_contexts = simple_merge_graphs(graphs, clusters)
print("Number of nodes in merged graph: ", G.number_of_nodes())
# collapse gene families/paralogs at successively lower thresholds
print("Collapsing paralogs...")
G = collapse_paralogs(G, centroid_contexts)
print("Collapsing at DNA...")
G = collapse_families(G,
outdir=temp_dir,
dna_error_threshold=0.98,
correct_mistranslations=True,
n_cpu=args.n_cpu,
quiet=(not args.verbose))[0]
print("Collapsing at families...")
G = collapse_families(G,
outdir=temp_dir,
family_threshold=args.family_threshold,
correct_mistranslations=False,
n_cpu=args.n_cpu,
quiet=(not args.verbose))[0]
print("Number of nodes in merged graph: ", G.number_of_nodes())
# Generate output
print("Generating output...")
# write out roary like gene_presence_absence.csv
mems_to_isolates = {}
for i, sub_G in enumerate(graphs):
for j, iso in enumerate(sub_G.graph['isolateNames']):
mems_to_isolates[str(i) + "_" + str(j)] = iso
n_samples = len(mems_to_isolates)
args.min_edge_support_sv = max(2, math.ceil(0.01 * n_samples))
# get original annotaiton IDs
# get original annotaiton IDs, lengts and whether or
# not an internal stop codon is present
orig_ids = {}
ids_len_stop = {}
for i, d in enumerate(args.directories):
with open(d + "gene_data.csv", 'r') as infile:
next(infile)
for line in infile:
line = line.split(",")
orig_ids[str(i) + "_" + line[2]] = line[3]
ids_len_stop[str(i) + "_" + line[2]] = (len(line[4]),
"*" in line[4][1:-3])
G = generate_roary_gene_presence_absence(G,
mems_to_isolates=mems_to_isolates,
orig_ids=orig_ids,
ids_len_stop=ids_len_stop,
output_dir=args.output_dir)
# write pan genome reference fasta file
generate_pan_genome_reference(G,
output_dir=args.output_dir,
split_paralogs=False)
# write out common structural differences in a matrix format
generate_common_struct_presence_absence(
G,
output_dir=args.output_dir,
mems_to_isolates=mems_to_isolates,
min_variant_support=args.min_edge_support_sv)
# add helpful attributes and write out graph in GML format
for node in G.nodes():
G.nodes[node]['size'] = len(G.nodes[node]['members'])
G.nodes[node]['genomeIDs'] = ";".join(G.nodes[node]['members'])
G.nodes[node]['members'] = list(G.nodes[node]['members'])
G.nodes[node]['centroid'] = G.nodes[node]['prevCentroids']
del G.nodes[node]['prevCentroids']
G.nodes[node]['geneIDs'] = ";".join(G.nodes[node]['seqIDs'])
G.nodes[node]['seqIDs'] = list(G.nodes[node]['seqIDs'])
G.nodes[node]['degrees'] = G.degree[node]
sub_graphs = list(
set([m.split("_")[0] for m in G.nodes[node]['members']]))
G.nodes[node]['subGraphs'] = ";".join(conv_list(sub_graphs))
for edge in G.edges():
G.edges[edge[0],
edge[1]]['genomeIDs'] = ";".join(G.edges[edge[0],
edge[1]]['members'])
G.edges[edge[0],
edge[1]]['members'] = list(G.edges[edge[0],
edge[1]]['members'])
sub_graphs = list(
set([
m.split("_")[0] for m in G.edges[edge[0], edge[1]]['members']
]))
G.edges[edge[0],
edge[1]]['subGraphs'] = ";".join(conv_list(sub_graphs))
nx.write_gml(G, args.output_dir + "merged_final_graph.gml")
# remove temporary directory
shutil.rmtree(temp_dir)
return
if __name__ == '__main__':
main()
| 36.036952 | 85 | 0.537683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,922 | 0.18726 |
983a2aeb9ad32099cd6f39c374a89a7d58015b41 | 1,535 | py | Python | campbellsoup/utilities_test.py | NBOCampbellToets/CampbellSoup | 45478c3e5e0362d01af8898078c6621f7b11c191 | [
"PostgreSQL"
] | null | null | null | campbellsoup/utilities_test.py | NBOCampbellToets/CampbellSoup | 45478c3e5e0362d01af8898078c6621f7b11c191 | [
"PostgreSQL"
] | 45 | 2016-11-21T16:01:44.000Z | 2018-05-25T13:35:01.000Z | campbellsoup/utilities_test.py | NBOCampbellToets/CampbellSoup | 45478c3e5e0362d01af8898078c6621f7b11c191 | [
"PostgreSQL"
] | 1 | 2019-02-27T08:04:55.000Z | 2019-02-27T08:04:55.000Z | # (c) 2016 Julian Gonggrijp
from .utilities import *
def test_un_camelcase():
assert un_camelcase('CampbellSoupX') == 'campbell_soup_x'
assert un_camelcase('NBOCampbellToets') == 'n_b_o_campbell_toets'
def test_append_to():
__all__ = []
class Example(object):
pass
@append_to(__all__)
class Illustration(object):
pass
@append_to(__all__)
def foo():
pass
def bar():
pass
assert __all__ == ['Illustration', 'foo']
def test_maybe():
tester = {
'banana': [
0,
'x',
[1, 2, 3],
{
'deep_banana': {'value': 'deeper_banana'},
}
],
'orange': [],
}
assert len(maybe(tester, 'banana')) == 4
assert maybe(tester, 'banana', 0) == 0
assert maybe(tester, 'banana', 1) == 'x'
assert maybe(tester, 'banana', 1, 0) == 'x'
assert maybe(tester, 'banana', 1, 1) == None
assert maybe(tester, 'banana', 2) == [1, 2, 3]
assert maybe(tester, 'banana', 2, 2) == 3
assert maybe(tester, 'banana', 2, 3) == None
assert maybe(tester, 'banana', 3, 'deep_banana', 'value') == 'deeper_banana'
assert maybe(tester, 'banana', 3, 'deep_banana', 'other') == None
assert maybe(tester, 'banana', 4) == None
assert maybe(tester, 'orange') == []
assert maybe(tester, 'orange', 3) == None
assert maybe(tester, 'orange', 3, fallback='') == ''
assert maybe(tester, 'kiwi') == None
assert maybe(tester, 'kiwi', fallback=10) == 10
| 28.425926 | 80 | 0.558306 | 75 | 0.04886 | 0 | 0 | 111 | 0.072313 | 0 | 0 | 361 | 0.235179 |
983a309c76f611c57739b0e01e641a1dd46468f2 | 1,242 | py | Python | change-njk.py | js-dos/repository | f4e08f223d88cd01140b815c34a274169ed4f954 | [
"MIT"
] | 12 | 2021-12-01T01:35:34.000Z | 2022-03-30T22:51:23.000Z | change-njk.py | js-dos/repository | f4e08f223d88cd01140b815c34a274169ed4f954 | [
"MIT"
] | 5 | 2021-10-12T20:00:44.000Z | 2022-02-15T13:45:47.000Z | change-njk.py | js-dos/repository | f4e08f223d88cd01140b815c34a274169ed4f954 | [
"MIT"
] | 1 | 2022-02-23T19:25:48.000Z | 2022-02-23T19:25:48.000Z | import os
import re
import yaml
for root, dirs, files in os.walk("."):
for file in files:
njk = os.path.join(root, file)
if njk.endswith(".njk"):
with open(njk, "r") as file:
lines = file.read().split("\n")
if not(lines[0].startswith("---")):
continue
end = 1
while not(lines[end].startswith("---")):
end += 1
meta = yaml.safe_load("\n".join(lines[1:end]))
field = "ogDescription"
if not(field in meta) or not("shortTitle" in meta):
continue
meta[field] = (meta["shortTitle"] +
" is a famous and most played DOS game that now is available to play in browser. With virtual" +
" mobile controls you also can play in " + meta["shortTitle"] +
" on mobile. On DOS.Zone " + meta["shortTitle"] + " available to play for free without registration.")
meta = yaml.dump(meta, default_flow_style=False, allow_unicode=True)
lines = [lines[0]] + meta.split("\n") + lines[end:]
with open(njk, "w") as file:
file.write("\n".join(lines))
| 36.529412 | 118 | 0.506441 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 315 | 0.253623 |
983c57ddcc0f5ba60bf439a4f0ac1aa43adc5576 | 557 | py | Python | core/migrations/0003_auto_20180730_1452.py | CobwebOrg/cobweb-django | 14241326860620dbaa64f7eefc6d4b393f80d23c | [
"MIT"
] | 7 | 2017-09-14T18:52:58.000Z | 2020-05-18T21:01:20.000Z | core/migrations/0003_auto_20180730_1452.py | CobwebOrg/cobweb-django | 14241326860620dbaa64f7eefc6d4b393f80d23c | [
"MIT"
] | 151 | 2017-09-14T18:46:02.000Z | 2022-02-10T09:18:44.000Z | core/migrations/0003_auto_20180730_1452.py | CobwebOrg/cobweb-django | 14241326860620dbaa64f7eefc6d4b393f80d23c | [
"MIT"
] | 1 | 2017-10-29T19:37:29.000Z | 2017-10-29T19:37:29.000Z | # Generated by Django 2.0.7 on 2018-07-30 21:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_auto_20180730_1451'),
]
operations = [
migrations.AddField(
model_name='resourcescan',
name='redirect_url',
field=models.URLField(blank=True, null=True),
),
migrations.AlterUniqueTogether(
name='resourcescan',
unique_together={('resource', 'is_active', 'redirect_url')},
),
]
| 24.217391 | 72 | 0.594255 | 464 | 0.833034 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.278276 |
983db1ab4d2da3e4b5bba68391b626a401dccec6 | 231 | py | Python | moto/servicediscovery/__init__.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | moto/servicediscovery/__init__.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | 1 | 2022-02-19T02:10:45.000Z | 2022-02-19T02:15:52.000Z | moto/servicediscovery/__init__.py | symroe/moto | 4e106995af6f2820273528fca8a4e9ee288690a5 | [
"Apache-2.0"
] | null | null | null | """servicediscovery module initialization; sets value for base decorator."""
from .models import servicediscovery_backends
from ..core.models import base_decorator
mock_servicediscovery = base_decorator(servicediscovery_backends)
| 38.5 | 76 | 0.848485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.329004 |
983e1c82111a5197c93a2fe5e845fe88a4784b12 | 4,954 | py | Python | csr/csr.py | AlexJanse/python_csr2transmart | c01a76dfa6ecfa4248b274144092ccc6c31aab5a | [
"MIT"
] | null | null | null | csr/csr.py | AlexJanse/python_csr2transmart | c01a76dfa6ecfa4248b274144092ccc6c31aab5a | [
"MIT"
] | null | null | null | csr/csr.py | AlexJanse/python_csr2transmart | c01a76dfa6ecfa4248b274144092ccc6c31aab5a | [
"MIT"
] | null | null | null | from datetime import date
from typing import Sequence, Optional, Union, Dict, List, Any
from pydantic import BaseModel, validator, Field
from csr.entity_validation import validate_entity_data
from csr.exceptions import DataException
class Individual(BaseModel):
"""
Individual entity
"""
individual_id: str = Field(..., min_length=1, identity=True)
taxonomy: Optional[str]
gender: Optional[str]
birth_date: Optional[date]
death_date: Optional[date]
ic_type: Optional[str]
ic_version: Optional[float]
ic_given_date: Optional[date]
ic_withdrawn_date: Optional[date]
report_her_susc: Optional[str]
report_inc_findings: Optional[str]
diagnosis_count: Optional[int]
age_first_diagnosis: Optional[int]
class Diagnosis(BaseModel):
"""
Diagnosis entity
"""
diagnosis_id: str = Field(..., min_length=1, identity=True)
individual_id: str = Field(..., min_length=1, references='Individual')
tumor_type: Optional[str]
topography: Optional[str]
treatment_protocol: Optional[str]
tumor_stage: Optional[str]
diagnosis_date: Optional[date]
diagnosis_center: Optional[str]
class Biosource(BaseModel):
"""
Biosource entity
"""
biosource_id: str = Field(..., min_length=1, identity=True)
biosource_dedicated: Optional[str]
individual_id: str = Field(..., min_length=1, references='Individual')
diagnosis_id: Optional[str] = Field(None, min_length=1, references='Diagnosis')
src_biosource_id: Optional[str] = Field(None, min_length=1, references='Biosource')
tissue: Optional[str]
biosource_date: Optional[date]
disease_status: Optional[str]
tumor_percentage: Optional[int]
@validator('src_biosource_id')
def check_self_reference(cls, src_biosource_id, values):
if src_biosource_id == values['biosource_id']:
raise DataException(f'Biosource cannot be derived from itself')
return src_biosource_id
class Biomaterial(BaseModel):
"""
Biomaterial entity
"""
biomaterial_id: str = Field(..., min_length=1, identity=True)
src_biosource_id: str = Field(..., min_length=1, references='Biosource')
src_biomaterial_id: Optional[str] = Field(None, min_length=1, references='Biomaterial')
biomaterial_date: Optional[date]
type: Optional[str]
library_strategy: Optional[List[str]]
analysis_type: Optional[List[str]]
@validator('src_biomaterial_id')
def check_self_reference(cls, src_biomaterial_id, values):
if src_biomaterial_id == values['biomaterial_id']:
raise DataException(f'Biomaterial cannot be derived from itself')
return src_biomaterial_id
@validator('library_strategy')
def validate_molecule_type_agrees_with_library_strategy(cls, library_strategy, values):
if 'type' in values and library_strategy is not None:
if values['type'] == 'DNA' and library_strategy.__contains__('RNA-Seq'):
raise DataException(f'Not allowed RNA-Seq library strategy for molecule type: DNA')
if values['type'] == 'RNA' and library_strategy.__contains__('WXS'):
raise DataException(f'Not allowed WXS library strategy for molecule type: RNA')
if values['type'] == 'RNA' and library_strategy.__contains__('WGS'):
raise DataException(f'Not allowed WGS library strategy for molecule type: RNA')
if values['type'] == 'RNA' and library_strategy.__contains__('DNA-meth_array'):
raise DataException(f'Not allowed DNA-meth_array library strategy for molecule type: RNA')
return library_strategy
class Study(BaseModel):
"""
Study
"""
study_id: str = Field(..., min_length=1, identity=True)
acronym: Optional[str]
title: Optional[str]
datadictionary: Optional[str]
class IndividualStudy(BaseModel):
"""
Study to individual mapping
"""
study_id_individual_study_id: str = Field(..., min_length=1, identity=True)
individual_study_id: str
individual_id: str = Field(..., min_length=1, references='Individual')
study_id: str = Field(..., min_length=1, references='Study')
SubjectEntity = Union[Individual, Diagnosis, Biosource, Biomaterial]
class CentralSubjectRegistry(BaseModel):
"""
Central subject registry
"""
entity_data: Dict[str, Sequence[Any]]
@staticmethod
def create(entity_data: Dict[str, Sequence[Any]]):
validate_entity_data(entity_data, list(SubjectEntity.__args__))
return CentralSubjectRegistry(entity_data=entity_data)
StudyEntity = Union[Study, IndividualStudy]
class StudyRegistry(BaseModel):
"""
Study registry
"""
entity_data: Dict[str, Sequence[Any]]
@staticmethod
def create(entity_data: Dict[str, Sequence[Any]]):
validate_entity_data(entity_data, list(StudyEntity.__args__))
return StudyRegistry(entity_data=entity_data)
| 34.402778 | 106 | 0.701655 | 4,578 | 0.924102 | 0 | 0 | 1,869 | 0.377271 | 0 | 0 | 858 | 0.173193 |
983e2d78b82a9887c7a118c07e33b600fb3cff99 | 337 | py | Python | hwtBuildsystem/yosys/config.py | optical-o/hwtBuildsystem | 791c0409fde07e06c443d4dc2eece284804c3609 | [
"MIT"
] | 2 | 2020-02-07T14:24:55.000Z | 2020-06-02T09:29:02.000Z | hwtBuildsystem/yosys/config.py | optical-o/hwtBuildsystem | 791c0409fde07e06c443d4dc2eece284804c3609 | [
"MIT"
] | null | null | null | hwtBuildsystem/yosys/config.py | optical-o/hwtBuildsystem | 791c0409fde07e06c443d4dc2eece284804c3609 | [
"MIT"
] | 1 | 2021-09-27T20:11:03.000Z | 2021-09-27T20:11:03.000Z | from hwtBuildsystem.fileUtils import which
class YosysConfig():
_DEFAULT_LINUX = '/usr/bin/yosys'
@classmethod
def getExec(cls):
exe = "yosys"
if which(exe) is None:
raise Exception('Can find yosys installation')
return exe
if __name__ == "__main__":
print(YosysConfig.getExec())
| 19.823529 | 58 | 0.637982 | 229 | 0.679525 | 0 | 0 | 165 | 0.489614 | 0 | 0 | 62 | 0.183976 |
983f4f7294d1c45f6ca7de2184dcf0e247274760 | 5,438 | py | Python | usr/examples/03-Drawing/crazy_drawing.py | SSSnow/MDV3 | 5f21f9bbc04bccc1c060cebd74a4e1781c10aa00 | [
"MIT"
] | 6 | 2017-05-24T06:51:37.000Z | 2020-07-04T16:36:29.000Z | usr/examples/03-Drawing/crazy_drawing.py | Killercotton/OpenMV_OV7670 | c4130052fc6e0f2eed2089222b3b1f2573c9825f | [
"MIT"
] | null | null | null | usr/examples/03-Drawing/crazy_drawing.py | Killercotton/OpenMV_OV7670 | c4130052fc6e0f2eed2089222b3b1f2573c9825f | [
"MIT"
] | 1 | 2019-10-21T11:08:37.000Z | 2019-10-21T11:08:37.000Z | # Crazy Drawing Example
#
# This example shows off your OpenMV Cam's built-in drawing capabilities. This
# example was originally a test but serves as good reference code. Please put
# your IDE into non-JPEG mode to see the best drawing quality.
import pyb, sensor, image, math
sensor.reset()
sensor.set_framesize(sensor.QVGA)
while(True):
# Test Set Pixel
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.set_pixel(x, y, 255)
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.set_pixel(x, y, (255, 255, 255))
# Test Draw Line
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_line([x0, y0, x1, y1])
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x0 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y0 = (pyb.rng() % (2*img.height())) - (img.height()//2)
x1 = (pyb.rng() % (2*img.width())) - (img.width()//2)
y1 = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_line([x0, y0, x1, y1])
# Test Draw Rectangle
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
w = (pyb.rng() % img.width())
h = (pyb.rng() % img.height())
img.draw_rectangle([x, y, w, h])
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
w = (pyb.rng() % img.width())
h = (pyb.rng() % img.height())
img.draw_rectangle([x, y, w, h])
# Test Draw Circle
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
r = (pyb.rng() % (img.width() if (img.width() > img.height()) else img.height()))
img.draw_circle(x, y, r)
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
r = (pyb.rng() % (img.width() if (img.width() > img.height()) else img.height()))
img.draw_circle(x, y, r)
# Test Draw String
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_string(x, y, "Hello\nWorld!")
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_string(x, y, "Hello\nWorld!")
# Test Draw Cross
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_cross(x, y)
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
img.draw_cross(x, y)
# Test Draw Keypoints
sensor.set_pixformat(sensor.GRAYSCALE)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
a = (pyb.rng() % (2*math.pi))
img.draw_keypoints([(x, y, a)])
sensor.set_pixformat(sensor.RGB565)
for i in range(10):
img = sensor.snapshot()
for j in range(100):
x = (pyb.rng() % (2*img.width())) - (img.width()//2)
y = (pyb.rng() % (2*img.height())) - (img.height()//2)
a = (pyb.rng() % (2*math.pi))
img.draw_keypoints([(x, y, a)])
| 39.985294 | 93 | 0.503678 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 398 | 0.073189 |
984030ab211e4f6ba95ea988b5e29eb0ca7a323a | 607 | py | Python | app_python/src/test/test_timegen.py | Mexator/devops | 0f7f4bcd453c9243888c51baa532c0d65e6d25e8 | [
"MIT"
] | 1 | 2021-08-23T11:54:32.000Z | 2021-08-23T11:54:32.000Z | app_python/src/test/test_timegen.py | Mexator/devops | 0f7f4bcd453c9243888c51baa532c0d65e6d25e8 | [
"MIT"
] | null | null | null | app_python/src/test/test_timegen.py | Mexator/devops | 0f7f4bcd453c9243888c51baa532c0d65e6d25e8 | [
"MIT"
] | null | null | null | """This module contains tests for the project. Can be split into several files
later"""
from datetime import datetime
import pytest
import pytz
from src.main.pages.get_clock_page import get_time_str
zone = pytz.timezone("Europe/Moscow")
@pytest.mark.parametrize(
('freeze_ts', 'expected_res'),
[
(1629629637, 'Sunday, 22 August 2021\n13:53:57'),
]
)
def test_time_gen(freezer, freeze_ts, expected_res):
"Test for generation of time widget content"
freezer.move_to(datetime.fromtimestamp(freeze_ts))
result = get_time_str(timezone=zone)
assert result == expected_res
| 25.291667 | 78 | 0.734761 | 0 | 0 | 0 | 0 | 365 | 0.601318 | 0 | 0 | 205 | 0.337727 |
9840c6e619f369bd516308c15457bc17f344220a | 2,007 | py | Python | marketplaces/cron_report_daily_activity.py | diassor/CollectorCity-Market-Place | 892ad220b8cf1c0fc7433f625213fe61729522b2 | [
"Apache-2.0"
] | 135 | 2015-03-19T13:28:18.000Z | 2022-03-27T06:41:42.000Z | marketplaces/cron_report_daily_activity.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | null | null | null | marketplaces/cron_report_daily_activity.py | dfcoding/CollectorCity-Market-Place | e59acec3d600c049323397b17cae14fdcaaaec07 | [
"Apache-2.0"
] | 83 | 2015-01-30T01:00:15.000Z | 2022-03-08T17:25:10.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
import datetime
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
from django.core.management import setup_environ
from django.core.mail import send_mail
#from django.db import transaction
import settings
setup_environ(settings)
"""
Daily Activity (Sign Up / Cancel)
Total Customers
Total Sign Ups This Month
Total Sign Ups This Today
Total Cancelations This Month
Total Cancelations This Today
"""
def report_daily_activity():
from django.core.mail import EmailMultiAlternatives, EmailMessage
from django.template import Context, loader
from reports.views import get_daily_activity_data
day = datetime.datetime.now()
try:
t_txt = loader.get_template("admin/mail/daily_activity_report.txt")
t_html = loader.get_template("admin/mail/daily_activity_report.html")
c = get_daily_activity_data(day)
subject, from_email, to = 'Daily Activity Report', "no-reply@greatcoins.com", "admin@greatcoins.com"
text_content = t_txt.render(Context(c))
html_content = t_html.render(Context(c))
msg = EmailMultiAlternatives(subject, text_content, from_email, [to])
msg.attach_alternative(html_content, "text/html")
msg.send()
except Exception, e:
logging.info(e)
mail = EmailMessage(subject='Error when trying to generate Daily Activity Report',
body=e,
from_email=settings.EMAIL_FROM,
to=[mail for (name, mail) in settings.STAFF],
headers={'X-SMTPAPI': '{\"category\": \"Error\"}'})
mail.send(fail_silently=True)
# send_mail('Error when trying to generate Daily Activity Report', e , settings.EMAIL_FROM, [mail for (name, mail) in settings.STAFF], fail_silently=True)
if __name__ == "__main__":
report_daily_activity() | 35.210526 | 161 | 0.655705 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 717 | 0.35725 |
98420fbde9020acf9d2baa635fdb24d604a951dc | 3,882 | py | Python | utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py | snoopdave/incubator-usergrid | 104f64eb4f318221f0d11bf43baad0e7c630cafe | [
"Apache-2.0"
] | 788 | 2015-08-21T16:46:57.000Z | 2022-03-16T01:57:44.000Z | utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py | snoopdave/incubator-usergrid | 104f64eb4f318221f0d11bf43baad0e7c630cafe | [
"Apache-2.0"
] | 101 | 2015-08-23T04:58:13.000Z | 2019-11-13T07:02:57.000Z | utils/usergrid-util-python/usergrid_tools/general/queue_monitor.py | snoopdave/incubator-usergrid | 104f64eb4f318221f0d11bf43baad0e7c630cafe | [
"Apache-2.0"
] | 342 | 2015-08-22T06:14:20.000Z | 2022-03-15T01:20:39.000Z | # */
# * Licensed to the Apache Software Foundation (ASF) under one
# * or more contributor license agreements. See the NOTICE file
# * distributed with this work for additional information
# * regarding copyright ownership. The ASF licenses this file
# * to you under the Apache License, Version 2.0 (the
# * "License"); you may not use this file except in compliance
# * with the License. You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing,
# * software distributed under the License is distributed on an
# * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# * KIND, either express or implied. See the License for the
# * specific language governing permissions and limitations
# * under the License.
# */
import argparse
import json
import datetime
import os
import time
import sys
import boto
from boto import sqs
### This monitors an SQS queue and measures the delta message count between polling intervals to infer the amount of time
### remaining to fully drain the queue
__author__ = 'Jeff.West@yahoo.com'
def total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24.0 * 3600) * 10.0 ** 6) / 10.0 ** 6
def total_milliseconds(td):
return (td.microseconds + td.seconds * 1000000) / 1000
def get_time_remaining(count, rate):
if rate == 0:
return 'NaN'
seconds = count * 1.0 / rate
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
return "%d:%02d:%02d" % (h, m, s)
def parse_args():
parser = argparse.ArgumentParser(description='Usergrid Loader - Queue Monitor')
parser.add_argument('-c', '--config',
help='The queue to load into',
type=str,
default='%s/.usergrid/queue_monitor.json' % os.getenv("HOME"))
parser.add_argument('-q', '--queue_name',
help='The queue name to send messages to. If not specified the filename is used',
default='entities',
type=str)
my_args = parser.parse_args(sys.argv[1:])
print str(my_args)
return vars(my_args)
def main():
args = parse_args()
queue_name = args.get('queue_name')
print 'queue_name=%s' % queue_name
start_time = datetime.datetime.utcnow()
first_start_time = start_time
print "first start: %s" % first_start_time
with open(args.get('config'), 'r') as f:
config = json.load(f)
sqs_config = config.get('sqs')
last_time = datetime.datetime.utcnow()
sqs_conn = boto.sqs.connect_to_region(**sqs_config)
queue = sqs_conn.get_queue(queue_name)
last_size = queue.count()
first_size = last_size
print 'Starting Size: %s' % last_size
sleep = 10
time.sleep(sleep)
rate_sum = 0
rate_count = 0
while True:
size = queue.count()
time_stop = datetime.datetime.utcnow()
time_delta = total_seconds(time_stop - last_time)
agg_time_delta = total_seconds(time_stop - first_start_time)
agg_size_delta = first_size - size
agg_messages_rate = 1.0 * agg_size_delta / agg_time_delta
size_delta = last_size - size
messages_rate = 1.0 * size_delta / time_delta
rate_sum += messages_rate
rate_count += 1
print '%s | %s | Size: %s | Processed: %s | Last: %s | Avg: %s | Count: %s | agg rate: %s | Remaining: %s' % (
datetime.datetime.utcnow(),
queue_name,
size, size_delta, round(messages_rate, 2),
round(rate_sum / rate_count, 2), rate_count,
round(agg_messages_rate, 2),
get_time_remaining(size, agg_messages_rate))
last_size = size
last_time = time_stop
time.sleep(sleep)
if __name__ == '__main__':
main()
| 27.928058 | 121 | 0.638073 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,414 | 0.364245 |
98435af6057b09cfff7b056786f631263c651d95 | 35 | py | Python | core/__init__.py | johndekroon/RPTR | db833aeb2698e598e098b59e43a310293d8cf5b8 | [
"MIT"
] | 2 | 2018-01-19T09:27:03.000Z | 2021-12-10T09:26:21.000Z | core/__init__.py | johndekroon/RPTR | db833aeb2698e598e098b59e43a310293d8cf5b8 | [
"MIT"
] | null | null | null | core/__init__.py | johndekroon/RPTR | db833aeb2698e598e098b59e43a310293d8cf5b8 | [
"MIT"
] | 2 | 2018-04-11T21:52:14.000Z | 2018-08-17T23:38:22.000Z | """
Init file for the RPTR Core
""" | 11.666667 | 27 | 0.628571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 35 | 1 |
9843d82030095d57d6ca2780e6e91c3791600765 | 433 | py | Python | src/hexdump2/__init__.py | HGrooms/hexdump2 | bd9c5de0ac7cd82d0a77c0b5561e5cb12eb7c2b3 | [
"MIT"
] | null | null | null | src/hexdump2/__init__.py | HGrooms/hexdump2 | bd9c5de0ac7cd82d0a77c0b5561e5cb12eb7c2b3 | [
"MIT"
] | null | null | null | src/hexdump2/__init__.py | HGrooms/hexdump2 | bd9c5de0ac7cd82d0a77c0b5561e5cb12eb7c2b3 | [
"MIT"
] | null | null | null | """
mirrors functionality of hexdump(1) and API interface of Python hexdump package.
Usage:
1. Within Python:
from hexdump2 import hexdump, color_always
# Enable or disable color all the time
color_always()
hexdump(bytes-like data)
2. From commandline, run the console scripts hexdump2 or hd2
$ hd2 -h
"""
# Import for everyone to use
from .hexdump2 import hexdump, hd, color_always
__all__ = ["hexdump", "hd", "color_always"]
| 20.619048 | 80 | 0.752887 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 364 | 0.840647 |
9844f78c527b440e7862858aa1908a9848373abe | 5,076 | py | Python | scripts/to-geojson.py | openstates/openstates-geo | ef9f81cf43dbae1c492924566f88e961ea7e8e9b | [
"MIT"
] | 9 | 2020-05-01T18:42:18.000Z | 2022-01-31T03:41:17.000Z | scripts/to-geojson.py | openstates/openstates-geo | ef9f81cf43dbae1c492924566f88e961ea7e8e9b | [
"MIT"
] | 4 | 2020-04-21T19:36:19.000Z | 2021-08-03T18:57:16.000Z | scripts/to-geojson.py | openstates/openstates-geo | ef9f81cf43dbae1c492924566f88e961ea7e8e9b | [
"MIT"
] | 4 | 2020-05-25T22:03:01.000Z | 2021-08-11T20:57:41.000Z | #!/usr/bin/env python3
import os
import sys
import csv
import json
import glob
import subprocess
import us
import openstates_metadata as metadata
OCD_FIXES = {
"ocd-division/country:us/state:vt/sldu:grand_isle-chittenden": "ocd-division/country:us/state:vt/sldu:grand_isle"
}
SKIPPED_GEOIDS = {
"cd-6098": "American Samoa",
"cd-6998": "Northern Mariana Islands",
"cd-6698": "Guam",
"cd-7898": "Virgin Islands",
}
def get_ocdid_records():
paths = [
"./data/ocdids/us_sldu.csv",
"./data/ocdids/us_sldl.csv",
"./data/ocdids/us_cd.csv",
]
all_divs = []
for path in paths:
with open(path, "r") as div_file:
reader = csv.DictReader(div_file)
all_divs += [row for row in reader]
return all_divs
ocd_ids = get_ocdid_records()
MTFCC_MAPPING = {
"G5200": "cd",
"G5210": "sldu",
"G5220": "sldl",
}
def merge_ids(geojson_path):
with open(geojson_path, "r") as geojson_file:
geojson = json.load(geojson_file)
for feature in geojson["features"]:
district_type = MTFCC_MAPPING[feature["properties"]["MTFCC"]]
# Identify the OCD ID by making a lookup against the CSV files
# The OCD ID is the cannonical identifier of an area on
# the Open States platform
geoid = "{}-{}".format(district_type, feature["properties"]["GEOID"])
if geoid in SKIPPED_GEOIDS:
continue
for row in ocd_ids:
if row["census_geoid"] == geoid:
ocd_id = row["id"]
break
else:
print(feature["properties"])
raise AssertionError("Could not find OCD ID for GEOID {}".format(geoid))
# Although OCD IDs contain the state postal code, parsing
# an ID to determine structured data is bad practice,
# so add a standalone state postal abbreviation property too
state = us.states.lookup(feature["properties"]["STATEFP"]).abbr.lower()
state_meta = metadata.lookup(abbr=state)
if ocd_id in OCD_FIXES:
ocd_id = OCD_FIXES[ocd_id]
if district_type == "cd":
cd_num = feature["properties"]["CD116FP"]
if cd_num in ("00", "98"):
cd_num = "AL"
district_name = f"{state.upper()}-{cd_num}"
else:
district = state_meta.lookup_district(ocd_id)
district_name = district.name
if not district:
raise ValueError(f"no {ocd_id} {district_type}")
feature["properties"] = {
"ocdid": ocd_id,
"type": district_type,
"state": state,
"name": district_name,
}
if district_type == "cd":
output_filename = f"data/geojson/us-{district_type}.geojson"
else:
output_filename = f"data/geojson/{state}-{district_type}.geojson"
print(f"{geojson_path} => {output_filename}")
with open(output_filename, "w") as geojson_file:
json.dump(geojson, geojson_file)
def process_va_lower(file):
newfilename = file.replace(".shp", ".geojson")
subprocess.run(
[
"ogr2ogr",
"-t_srs",
"crs:84",
"-f",
"GeoJSON",
newfilename,
file,
],
check=True,
)
with open(newfilename, "r") as geojson_file:
geojson = json.load(geojson_file)
state = "va"
district_type = "sldl"
for feature in geojson["features"]:
n = feature["properties"]["District_N"]
feature["properties"] = {
"ocdid": f"ocd-division/country:us/state:va/sldl:{n}",
"type": district_type,
"state": state,
"name": str(n),
}
output_filename = f"data/geojson/{state}-{district_type}.geojson"
print(f"{newfilename} => {output_filename}")
with open(output_filename, "w") as geojson_file:
json.dump(geojson, geojson_file)
if __name__ == "__main__":
try:
os.makedirs("./data/geojson")
except FileExistsError:
pass
expected = 102
if len(sys.argv) == 1:
files = sorted(glob.glob("data/source/tl*.shp"))
if len(files) != expected:
raise AssertionError(f"Expecting {expected} shapefiles, got {len(files)}).")
else:
files = sys.argv[1:]
process_va_lower("data/source/va_lower_remedial_2019.shp")
for file in files:
newfilename = file.replace(".shp", ".geojson")
if os.path.exists(newfilename):
print(newfilename, "already exists, skipping")
else:
print(file, "=>", newfilename)
subprocess.run(
[
"ogr2ogr",
"-where",
"GEOID NOT LIKE '%ZZ'",
"-t_srs",
"crs:84",
"-f",
"GeoJSON",
newfilename,
file,
],
check=True,
)
merge_ids(newfilename)
| 28.840909 | 117 | 0.556147 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,601 | 0.315406 |
984978327bec13adb800652d36ac141bd0b2ae02 | 249 | py | Python | mask_detection/api/serializers.py | vinaykudari/MaskDetectionAPI | ab0c974614984b34cbb98d4d3ea247d8a9d70a14 | [
"MIT"
] | 1 | 2022-03-15T10:11:04.000Z | 2022-03-15T10:11:04.000Z | mask_detection/api/serializers.py | vinaykudari/MaskDetectionAPI | ab0c974614984b34cbb98d4d3ea247d8a9d70a14 | [
"MIT"
] | 5 | 2021-04-08T21:32:19.000Z | 2022-03-12T00:43:06.000Z | mask_detection/api/serializers.py | vinaykudari/mask-detection-api | ab0c974614984b34cbb98d4d3ea247d8a9d70a14 | [
"MIT"
] | 1 | 2021-03-08T07:57:17.000Z | 2021-03-08T07:57:17.000Z | import PIL
from rest_framework import serializers
from rest_framework.exceptions import ParseError
from .models import Image
class ImageSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Image
fields = ('pk', 'image', )
| 20.75 | 62 | 0.795181 | 120 | 0.481928 | 0 | 0 | 0 | 0 | 0 | 0 | 11 | 0.044177 |
9849a7627e1606395ffece5888724940eb62f35a | 2,841 | py | Python | mod_audio.py | NUSTEM-UK/Robot-Orchestra-3 | 994d52411ea3c7b48d067dcf7066f6578e8bc512 | [
"MIT"
] | null | null | null | mod_audio.py | NUSTEM-UK/Robot-Orchestra-3 | 994d52411ea3c7b48d067dcf7066f6578e8bc512 | [
"MIT"
] | null | null | null | mod_audio.py | NUSTEM-UK/Robot-Orchestra-3 | 994d52411ea3c7b48d067dcf7066f6578e8bc512 | [
"MIT"
] | null | null | null | import glob
import os
import re
import pygame
from math import log2, pow
try:
pygame.init() # Throws error in pylint: security issue for C module. Ignore.
except ImportError:
exit("This script requires the pygame module\nInstall with: sudo pip3 install pygame")
BANK = os.path.join(os.path.dirname(__file__), "sounds")
NOTE_OFFSET = 0
FILETYPES = ['*.wav', '*.ogg']
samples = []
files = []
# Tuning, and constants for freq-to-note conversion
A4 = 440
C0 = A4*pow(2, -4.75)
name = ["C", "C#", "D", "D#", "E", "F", "F#", "G", "G#", "A", "A#", "B"]
pygame.mixer.pre_init(44100, -16, 1, 512)
pygame.mixer.init()
pygame.mixer.set_num_channels(256)
patches = glob.glob(os.path.join(BANK, '*'))
print(patches)
patch_index = 0
if len(patches) == 0:
exit("Couldn't find any .wav files in {}".format(BANK))
def natural_sort_key(s, _nsre=re.compile('([0-9]+)')):
"""Used to ensure samples load in sane order from filesystem."""
return [int(text) if text.isdigit() else text.lower() for text in re.split(_nsre, s)]
def load_samples(patch):
"""Load audio samples into buffers for playback."""
global samples, files, octave, octaves
files = []
print('Loading samples from: {}'.format(patch))
for filetype in FILETYPES:
files.extend(glob.glob(os.path.join(patch, filetype)))
files.sort(key=natural_sort_key)
octaves = len(files) / 12
samples = [pygame.mixer.Sound(sample) for sample in files]
octave = int(octaves / 2)
def handle_note(channel, octave):
"""Synthesise a commanded note using Pygame samples: direct audio playback."""
channel = channel + (12 * octave) + NOTE_OFFSET
if channel < len(samples):
# print('Playing Sound: {}'.format(files[channel]))
print('Playing sound: {}'.format(channel))
samples[channel].play(loops=0)
else:
print('Note out of bounds')
def handle_octave_up():
global octave
if octave < octaves:
octave += 1
print('Selected Octave: {}'.format(octave))
def handle_octave_down():
global octave
if octave > 0:
octave -= 1
print('Selected Octave: {}'.format(octave))
def scale_up(notes, delay):
global octave
for note in range(notes):
handle_note(note, octave)
sleep(delay)
def scale_down(notes, delay):
global octave
for note in range(notes):
handle_note(notes-note, octave)
sleep(delay)
def freq_to_note(freq):
"""Outputs note and octave for input frequency.
Based on https://www.johndcook.com/blog/2016/02/10/musical-pitch-notation/
by John D. Cook
"""
h = round(12*log2(freq/C0)) # Python3 only
# h = round(12*(log(freq/C0)/log(2)))
octave = (h // 12) - 6
n = h % 12
return name[int(n)], int(octave)
# Load audio samples
load_samples(patches[patch_index])
| 26.305556 | 90 | 0.644843 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 882 | 0.310454 |
9849f87cd04fbe8f9187c06f9c06b1a0a0ffe11c | 1,400 | py | Python | nds/emden.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | 12 | 2022-02-23T11:06:06.000Z | 2022-03-04T17:21:44.000Z | nds/emden.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | nds/emden.py | risklayer/corona-landkreis-crawler | 2e82448ff614240365de9493eafa0e6a620ac615 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
from botbase import *
_emden_cc = re.compile(r"(?:ha\w+en\swir\s(?:erneut\s)?|Gesundheitsamt)\s*([0-9.]+|\w+)\s+(?:Corona-\s*)?Neuinfektion(?:en)?")
_emden = re.compile(r"([0-9.]+)\s(?:Personen|Infektionen)\s*,\svon\sdenen\s*([0-9.]+)\s(?:\(\+?(-?\s*[0-9.]+)\)\s)?Personen\sgenesen\sund\s([0-9.]+)\s(?:\(\+?(-?\s*[0-9.]+)\)\s)?Personen\sverstorben")
_emden_q = re.compile(r"[Ii]n\sQuarantäne\sbefind\w+\s(?:\w+\s)*([0-9.]+)", re.U)
def emden(sheets):
soup = get_soup("https://www.emden.de/nachrichten")
articles = soup.findAll(itemtype="http://schema.org/Article")
article = next(a for a in articles if "Neuinfektion" in a.find("h3").get_text())
date = article.find("time").text if article else None
date = check_date(date, "Emden")
url = urljoin("https://www.emden.de/", article.find("a")["href"])
print("Getting", url)
assert url
soup = get_soup(url)
text = "\n".join(p.get_text(" ") for p in soup.find(itemprop="articleBody").findAll("p"))
#print(text)
c, g, gg, d, dd = map(force_int, _emden.search(text).groups())
cc = force_int(_emden_cc.search(text).group(1))
q = force_int(_emden_q.search(text).group(1))
update(sheets, 3402, c=c, cc=cc, d=d, dd=dd, g=g, q=q, sig="Bot", ignore_delta=True)
return True
schedule.append(Task(9, 18, 13, 35, 600, emden, 3402))
if __name__ == '__main__': emden(googlesheets())
| 50 | 200 | 0.622857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 535 | 0.38187 |
984a2d55e759402f874e2e89a623446559ac02f3 | 632 | py | Python | fedhf/component/evaluator/__init__.py | beiyuouo/FedHF | 0caa873a5db7494b0f9197848c34243fcb8c49f6 | [
"Apache-2.0"
] | 2 | 2021-12-15T07:29:57.000Z | 2021-12-20T02:37:35.000Z | fedhf/component/evaluator/__init__.py | beiyuouo/FedHF | 0caa873a5db7494b0f9197848c34243fcb8c49f6 | [
"Apache-2.0"
] | null | null | null | fedhf/component/evaluator/__init__.py | beiyuouo/FedHF | 0caa873a5db7494b0f9197848c34243fcb8c49f6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : fedhf\component\evaluator\__init__.py
# @Time : 2022-05-03 16:00:21
# @Author : Bingjie Yan
# @Email : bj.yan.pa@qq.com
# @License : Apache License 2.0
__all__ = ["Evaluator", "build_evaluator", "evaluator_factory", "BaseEvaluator"]
from .evaluator import Evaluator
from .base_evaluator import BaseEvaluator
evaluator_factory = {'evaluator': Evaluator, 'base_evaluator': BaseEvaluator}
def build_evaluator(name):
if name not in evaluator_factory.keys():
raise ValueError(f'Unknown evaluator name: {name}')
return evaluator_factory[name] | 31.6 | 80 | 0.702532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.541139 |
984b4632ec6c3c6b385d7464735d5005831a461f | 11,462 | py | Python | RAISoft/gui_scripts/ModulatedPhotocurrent.py | daveraees/EMA_Test_Lab | a3073c5ec205d6ee327a993b38e92698c12cb0a6 | [
"MIT"
] | null | null | null | RAISoft/gui_scripts/ModulatedPhotocurrent.py | daveraees/EMA_Test_Lab | a3073c5ec205d6ee327a993b38e92698c12cb0a6 | [
"MIT"
] | null | null | null | RAISoft/gui_scripts/ModulatedPhotocurrent.py | daveraees/EMA_Test_Lab | a3073c5ec205d6ee327a993b38e92698c12cb0a6 | [
"MIT"
] | null | null | null | # project libraries imports:
# instruments:
from GenericScript import TestScript
class Test(TestScript):
def __init__(self):
TestScript.__init__(self)
self.Name = 'Modulated photocurrent frequency spectrum'
self.Description = """Measurement of the modulation frequency spectrum of photocurrent"""
return
def init_parameters(self):
"""
create the list of the parameter and initialize some basic values
"""
#voltages, acquizDelay=0.1, voltStepDelay=5
self.generate_parameter(Name='Tested wavelength',
Unit='nm',
Type='float',
Iterable = False,
Limits = [ 1200, 200, None],
Description='Wavelengths to irradiate the sample with')
self.set_parameter_value('Tested wavelength', 800)
self.generate_parameter(Name='Tested frequencies',
Unit='Hz',
Type='float',
Iterable = True,
Limits = [ 1e5, 0.01, None],
Description='Light chopper modulation frequencies')
#self.set_parameter_value('Tested frequencies', 64)
self.generate_parameter(Name='Lamp voltage',
Unit='Volts',
Type='float',
Iterable = False,
Limits = [ 20, 0.0, None],
Description='Voltage to be applied to the light source (Tungsten Lamp, or LED)')
self.set_parameter_value('Lamp voltage', 20.0)
self.generate_parameter(Name='Lamp voltage offset',
Unit='Volts',
Type='float',
Iterable = False,
Limits = [ 5, 0.0, None],
Description='Offset voltage to be applied to the LED light source in case of Agilent Waveform generator is used')
self.set_parameter_value('Lamp voltage offset', 0.0)
self.generate_parameter(Name='Acquisition delay',
Unit='Seconds',
Type='float',
Iterable = False,
Limits = [ None, None, [0.01, 0.1, 0.2, 0.5, 1.0, 2.0, 5.0]],
Description='delay of the data acquizition loop in seconds')
self.set_parameter_value('Acquisition delay', 2.0)
self.generate_parameter(Name='Settle time frequency',
Unit='Seconds',
Type='float',
Iterable = False,
Limits = [ None, 0.001, None],
Description='Delay to settle signal at each frequency in seconds')
self.set_parameter_value('Settle time frequency', 5.0)
self.generate_parameter(Name='Settle time DC volts',
Unit='Seconds',
Type='float',
Iterable = False,
Limits = [ None, 0.001, None],
Description='Delay before the irradiation starts, with applied voltage to settle the current (in seconds)')
self.set_parameter_value('Settle time DC volts', 5.0)
self.generate_parameter(Name='DC voltage',
Unit='Volts',
Type='float',
Iterable = False,
Limits = [ 10.5, -10.5, None],
Description='DC voltage to be applied to the sample from AUX OUT 1 of the LockIn Apmlifier')
self.set_parameter_value('DC voltage', 0.0)
self.generate_parameter(Name='Chopper device',
Unit='',
Type='name',
Iterable = False,
Limits = [ None, None, ['Wheel chopper SR530','Agilent 33220A Waveform generator']],
Description="""Device to provide modulation of irradiation.
In case of Wheel chopper, the wavelength will be adjusted by monochromator
and chopper frequency will be adjusted by LockIn AUX OUT 2 (0.6 - 10.5 V)
connected to the control voltage input of the wheel controler.
In case of Agilent 33220A Waveform generator, the set frequencies will be selected
on the output terminal of the device, e.g. to feed LED""")
self.set_parameter_value('Chopper device', 'Wheel chopper SR530')
return
def init_devices(self):
from Devices import Clock, Thermometer, PowerSource,\
Monochromator, LockIn,\
HamamatsuPhotodiode, GeneratorSquare
self.Channel = LockIn()
self.append_active_devices_list(self.Channel)
deviceName = self.get_parameter_value('Chopper device')
if deviceName == 'Wheel chopper SR530':
self.Filter = Monochromator()
self.append_active_devices_list(self.Filter)
self.Lamp = PowerSource()
self.append_active_devices_list(self.Lamp)
if deviceName == 'Agilent 33220A Waveform generator':
self.Filter = Monochromator()
self.append_active_devices_list(self.Filter)
self.Chopper = GeneratorSquare()
self.append_active_devices_list(self.Chopper)
#from instruments.Keithley import Electrometer617
#from instruments.Agilent import A34401_4ohm
################################################################
## Here begins the initialization of devices ##################
################################################################
# define and ACTIVATE the instruments
#PeltierP = PID_CBdriver(TCchannel=1, count=20)
#PeltierP.Activate()
self.LightMeter = HamamatsuPhotodiode()
self.append_active_devices_list(self.LightMeter)
self.Stopwatch = Clock()
self.append_active_devices_list(self.Stopwatch)
self.Temper = Thermometer()
self.append_active_devices_list(self.Temper)
def _run(self):
""" simple current-voltage characteristics measurement """
################################################################
## Here begins the experimetal part ###########################
################################################################
deviceName = self.get_parameter_value('Chopper device')
Wavelength = self.get_parameter_value('Tested wavelength')
Frequencies = self.get_parameter_value('Tested frequencies')
DCvoltage = self.get_parameter_value('DC voltage')
LampVolt = self.get_parameter_value('Lamp voltage')
LampOffset = self.get_parameter_value('Lamp voltage offset')
WaitDC = self.get_parameter_value('Settle time DC volts')
WaitWL = self.get_parameter_value('Settle time frequency')
WaitDAQ = self.get_parameter_value('Acquisition delay')
# parameters for recalcualtion of the frequencies to the voltages:
WheelChopperVoltageCoeff = 0.005
WheelChopperVoltageOffset = 0.0
self.datastore.report('Using device "%(device)s" for modulation' % \
{'device':deviceName})
# initializing the measurement for given instrument:
if deviceName == 'Wheel chopper SR530':
self.Filter.close_shutter()
self.Filter.set_wave(Wavelength)
self.Lamp.set_volts(LampVolt)
self.Lamp.set_amperes_limit(10)
self.Lamp.set_output_on()
# calculate the voltages set to regulate frequencies
self.Channel.set_aux_output(2, 1.0)
self.Filter.open_shutter()
self.Stopwatch.Wait(3)
# initializing the measurement for given instrument:
if deviceName == 'Agilent 33220A Waveform generator':
self.Filter.close_shutter()
self.Filter.set_wave(Wavelength)
self.Chopper.set_duty_cycle(50)
self.Chopper.set_freq(1000)
LEDamplitude = LampVolt - LampOffset
LEDoffset = LampOffset
self.Chopper.set_amplitude_vrms(LEDamplitude)
self.Chopper.set_offset(LEDoffset)
self.Chopper.set_output_on()
self.Filter.open_shutter()
self.Stopwatch.Wait(3)
# apply DC voltage to the sample and
# measure during 1kHz irradiation
self.datastore.report('Stabilizing the DC voltage at the sample, irradiation modulation at 1 kHz' )
self.Channel.set_aux_output(channel=1, voltage=DCvoltage)
self.datastore.report('Irradiation wavelength %0.1f nm' % Wavelength )
self.observe(WaitDC,WaitDAQ)
#self.datastore.report ('estimation of LockIn amplifier gain for desired Frequencies:')
self.datastore.report('Starting the modulated photocurrent spectra measurement from %(from)f to %(to)f Hz' % \
{'from':Frequencies[0], 'to':Frequencies[-1]})
self.datastore.report ('Starting Frequency scanning:')
for freq in Frequencies:
if deviceName == 'Wheel chopper SR530':
ChopperVoltage = freq * WheelChopperVoltageCoeff + WheelChopperVoltageOffset
self.Channel.set_aux_output(channel=2, voltage=ChopperVoltage)
if deviceName == 'Agilent 33220A Waveform generator':
self.Chopper.set_freq(freq)
self.Stopwatch.Wait(0.1)
self.datastore.report ('Set New frequency: %0.1f' % freq)
if freq > 9.99:
gain = self.Channel.auto_gain()
self.datastore.report('Found new Lock-In Amplifier GAIN: %d' % gain)
else:
gain = self.Channel.get_gain()
self.datastore.report('Kept old Lock-In Amplifier GAIN: %d' % gain)
minimumWait = WaitWL + 10/freq
self.observe(WaitWL, WaitDAQ)
self.datastore.separateData()
self.datastore.report ('Experiment finished')
self.Channel.set_aux_output(channel=1, voltage=0.0)
if deviceName == 'Wheel chopper SR530':
#self.Lamp.set_output_off()
#self.Filter.close_shutter()
self.Channel.set_aux_output(channel=2, voltage=1.0)
if deviceName == 'Agilent 33220A Waveform generator':
#self.Chopper.set_output_off()
pass
#self.datastore.report('finished the Modulated frequency photocurrent spectrum measurement')
return
from AllScripts import ScriptsBase
ScriptsBase.add_script(Test, 'Photoconductivity') | 50.716814 | 145 | 0.538126 | 11,294 | 0.985343 | 0 | 0 | 0 | 0 | 0 | 0 | 3,985 | 0.347671 |
984b753e7423e80490851d6ebcaf98c1c5568d93 | 1,102 | py | Python | ComparativeGenomics/PAML_Analyze/bed_extract_ensembl_NCBIGeneID.py | ajshultz/avian-immunity | 6b8a00aaa1a08d89bfb9cf3458a72188ade7cb4f | [
"MIT"
] | 4 | 2019-09-18T10:46:29.000Z | 2021-03-16T14:55:38.000Z | ComparativeGenomics/PAML_Analyze/bed_extract_ensembl_NCBIGeneID.py | elifesciences-publications/avian-immunity | 6b8a00aaa1a08d89bfb9cf3458a72188ade7cb4f | [
"MIT"
] | null | null | null | ComparativeGenomics/PAML_Analyze/bed_extract_ensembl_NCBIGeneID.py | elifesciences-publications/avian-immunity | 6b8a00aaa1a08d89bfb9cf3458a72188ade7cb4f | [
"MIT"
] | 4 | 2019-01-18T10:33:29.000Z | 2020-02-13T18:51:21.000Z | #! /usr/bin/env python
import sys
import os
#This script will read in a bed file with both ensemble IDs and NCBI gene IDs merged, and output a translation table of these IDs.
bedfile = sys.argv[1]
outfile = sys.argv[2]
bed = open(bedfile,"r")
output = open(outfile,"w")
output.write("NCBIGeneID\tENSEMBLID\n")
for line in bed:
line = line.strip()
if "GeneID" in line:
if "ENSGALG" in line:
line2 = line.split("|")
#Extract NCBI Gene IDs
geneidpartpos = [i for i, j in enumerate(line2) if "GeneID" in j]
geneidpart = line2[geneidpartpos[0]].split(",")
geneidpos = [i for i, j in enumerate(geneidpart) if "GeneID" in j]
geneidpart = geneidpart[geneidpos[0]]
geneid = geneidpart.split("GeneID:")[-1]
#Extract Ensembl IDs
ensidpartpos = [i for i, j in enumerate(line2) if "ENSGALG" in j]
ensidpart = line2[ensidpartpos[0]].split(";")
ensidpos = [i for i, j in enumerate(ensidpart) if "ENSGALG" in j]
ensidpart = ensidpart[ensidpos[0]]
ensid = ensidpart.split("ID=gene:")[-1]
output.write(geneid+"\t"+ensid+"\n")
bed.close()
output.close() | 26.238095 | 130 | 0.669691 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 312 | 0.283122 |
984c178778bd5a8ed38c3fb9100e9a5a25991cad | 3,050 | py | Python | ok_redirects/models.py | LowerDeez/ok-redirects | 5eb317a8aeae70a4899a06c4eefa9a088f269a03 | [
"MIT"
] | 1 | 2021-01-04T08:58:09.000Z | 2021-01-04T08:58:09.000Z | ok_redirects/models.py | LowerDeez/ok-redirects | 5eb317a8aeae70a4899a06c4eefa9a088f269a03 | [
"MIT"
] | null | null | null | ok_redirects/models.py | LowerDeez/ok-redirects | 5eb317a8aeae70a4899a06c4eefa9a088f269a03 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import pgettext_lazy
from .constants import REDIRECT_TYPE_CHOICES, REDIRECT_301
from .fields import MultipleChoiceArrayField
__all__ = (
'Redirect',
)
LANGUAGES = getattr(settings, 'LANGUAGES', [])
class Redirect(models.Model):
site = models.ForeignKey(
Site,
models.CASCADE,
verbose_name=pgettext_lazy("ok:redirects", 'site')
)
old_path = models.CharField(
pgettext_lazy("ok:redirects", 'redirect from'),
max_length=250,
db_index=True,
help_text=pgettext_lazy(
"ok:redirects",
"This should be an absolute path, "
"excluding the domain name. Example: '/events/search/'."
),
)
languages = MultipleChoiceArrayField(
models.CharField(
max_length=2,
choices=LANGUAGES,
blank=True
),
blank=True,
default=[lang[0] for lang in LANGUAGES] if LANGUAGES else list,
verbose_name=pgettext_lazy("ok:redirects", "Languages to check redirect")
)
is_ignore_get_params = models.BooleanField(
pgettext_lazy("ok:redirects", 'Ignore GET parameters'),
default=True
)
new_path = models.CharField(
pgettext_lazy("ok:redirects", 'redirect to'),
blank=True,
max_length=250,
help_text=pgettext_lazy(
"ok:redirects",
"This can be either an absolute path (as above) "
"or a full URL starting with 'http://'."
),
)
to_language = models.CharField(
pgettext_lazy("ok:redirects", 'to language'),
blank=True,
choices=LANGUAGES,
max_length=5,
help_text=pgettext_lazy(
"ok:redirects",
"Leave blank to redirect to the current language on the site"
),
)
status_code = models.PositiveSmallIntegerField(
db_index=True,
choices=REDIRECT_TYPE_CHOICES,
default=REDIRECT_301,
verbose_name=pgettext_lazy("ok:redirects", 'Status code'),
help_text=pgettext_lazy(
"ok:redirects",
'The redirect http status code.'
)
)
counter = models.PositiveIntegerField(
blank=True,
default=0,
verbose_name=pgettext_lazy("ok:redirects", 'Counter'),
)
is_active = models.BooleanField(
pgettext_lazy("ok:redirects", 'Is active'),
default=True,
db_index=True,
)
class Meta:
db_table = 'ok_redirects'
ordering = ('old_path',)
unique_together = (('site', 'old_path'),)
verbose_name = pgettext_lazy("ok:redirects", 'redirect')
verbose_name_plural = pgettext_lazy("ok:redirects", 'redirects')
def __str__(self):
return (
f"{pgettext_lazy('ok:redirects', 'Redirect')} "
f"{self.status_code}: "
f"`{self.old_path}` ---> `{self.new_path}`"
)
| 30.5 | 81 | 0.605902 | 2,705 | 0.886885 | 0 | 0 | 0 | 0 | 0 | 0 | 810 | 0.265574 |
984c38c3e2d1f35b3e36c250e8706e494118b61b | 4,330 | py | Python | slide/D-g-conv.py | tribbloid/convnet-abstraction | 8827c1dd8b19462bd0499cd0a8d81a8688e7fc48 | [
"Apache-2.0"
] | 5 | 2019-02-11T20:39:05.000Z | 2022-01-11T13:25:45.000Z | slide/D-g-conv.py | tribbloid/convnet-abstraction | 8827c1dd8b19462bd0499cd0a8d81a8688e7fc48 | [
"Apache-2.0"
] | null | null | null | slide/D-g-conv.py | tribbloid/convnet-abstraction | 8827c1dd8b19462bd0499cd0a8d81a8688e7fc48 | [
"Apache-2.0"
] | 1 | 2019-06-29T11:45:30.000Z | 2019-06-29T11:45:30.000Z | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.2'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% {"slideshow": {"slide_type": "skip"}}
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# %% {"slideshow": {"slide_type": "skip"}}
import os
import sys
from typing import Tuple
from dataclasses import dataclass
if '' in sys.path:
sys.path.remove('')
module_path = os.path.abspath(os.path.join('../python'))
if module_path not in sys.path:
sys.path.append(module_path)
import networkx as nx
from graphPlot import drawGraph, setCanvas
from const import *
setCanvas()
# %% [markdown] {"slideshow": {"slide_type": "slide"}}
# ## G-ConvNet
#
# $$f_+(y) = <A_{ug} \circ f(x), w_0(x)> _x$$
#
# - this implies bijection/isomorphism $y \longleftrightarrow A_{ug}$
# - ... and high-level features usually have more dimensions $\{x\} \subset \{y\}$
#
# ---
#
# All of the followings are concrete subclasses:
#
#
# Augmentation types | Answer
# --- | ---
# 2d translation | ConvNet
# **others** | **G-ConvNet**
# - 2d translation + 90$^{\circ}$ rotation | Group Equivariant CNNs
# - 2d translation + rotation | Harmonic Net
# - 3d rotation | Spherical CNNs
# - 3d translation + rotation | Tensor Field Net
#
#
# %% [markdown] {"slideshow": {"slide_type": "slide"}}
# ## ConvNet
#
# | - | Input $f(x)$ | High-level $f_+(y)$, $f_{++}(z)$, ... | Augmentation $A_{ug}$, $U_{ga}$, ...
# | ---|---------------|----------------|-------------------------------
# | domain | $R^2$ | $R^2$ | $R^2$ (translation only)
#
# ---
#
# - First of its kind but not the last
# - A rare case when high-level feature domain $\{y\} = \{x\}$, in all other cases $\{y\} \supset \{x\}$
#
# <img src="assets/alexnet.png">
#
# %% [markdown] {"slideshow": {"slide_type": "slide"}}
# ## ConvNet
# %% {"slideshow": {"slide_type": "-"}}
g = nx.DiGraph(directed=True)
nodes = [
"$f: R^2$",
"$f_+: R^2$",
"$f_{++}: R^2$",
"$f_{+++}: R^2$"
]
for i in range(0, len(nodes) - 1):
g.add_edge(nodes[i], nodes[i + 1], text="conv")
drawGraph(g)
plt.show()
# %% [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Group Equivariant CNNs (ICML 2016*)
#
# | - | Input $f(x)$ | High-level $f_+(y)$, $f_{++}(z)$, ... | Augmentation $A_{ug}$, $U_{ga}$, ...
# | --- |---|---|---
# | domain | $R^2$ | $R^2 \times p4$ | $R^2 \times p4$ (translation, rotation $\pm 90^{\circ}$)
#
# ---
#
# - change looks trivial
#
# <img src="assets/r2p4.png" width="500">
#
# ---
#
# [*] T. S. Cohen and M. Welling, “Group Equivariant Convolutional Networks,” ICML 2016.
# %% [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Group Equivariant CNNs (ICML 2016*)
# %% {"slideshow": {"slide_type": "-"}}
g = nx.DiGraph(directed=True)
tail = "$f: R^2$"
angles = [-90, 0, 90, 180]
def regularize(v: int) -> int:
if v > 180:
return regularize(v - 360)
elif v <= -180:
return regularize(v + 360)
else:
return v
def repr(v: int) -> str:
r = regularize(v)
if r > 0:
return f"+{str(r)}^{{\circ}}"
else:
return f"{str(r)}^{{\circ}}"
sub = "+"
subPlus = ""
for i in angles:
node = f"$f_{{{sub}}} | {repr(i)}$"
g.add_edge(tail, node, text=f"${repr(i)}$")
for epoch in range(1, 3):
subPlus = f"{sub}+"
for i in angles:
for j in angles:
prev = f"$f_{{{sub}}} | {repr(i)}$"
node = f"$f_{{{subPlus}}} | {repr(j)}$"
g.add_edge(prev, node, text=f"${repr(j - i)}$")
sub = subPlus
drawGraph(g, font='humor sans', label_pos=0.8)
plt.show()
# %% [markdown] {"slideshow": {"slide_type": "slide"}}
# ## Group Equivariant CNNs (ICML 2016*) - Alternatively
#
# | - | Input $f(x)$ | High-level $f_+(y)$, $f_{++}(z)$, ... | Augmentation $A_{ug}$, $U_{ga}$, ...
# | --- |---|---|---
# | domain | $R^2$ | $R^2 \times p4m$ | $R^2 \times p4m$ (translation, rotation $\pm 90^{\circ}$, flipping)
#
# ---
#
# - Size of filter bank start to become annoying, but still acceptable.
#
# <img src="assets/r2p4m.png" width="500">
#
# ---
#
# [*] T. S. Cohen and M. Welling, “Group Equivariant Convolutional Networks,” ICML 2016.
| 23.791209 | 107 | 0.550577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,069 | 0.707469 |
984cb898e2851daa433e2e4dd369d4cf167238af | 170 | py | Python | tests/transformer/test_assert.py | rahulbahal7/restricted-python | c39cffe71dfc30630e946977735303d3a65b0383 | [
"ZPL-2.1"
] | 236 | 2015-01-03T17:14:53.000Z | 2022-03-01T15:52:46.000Z | tests/transformer/test_assert.py | rahulbahal7/restricted-python | c39cffe71dfc30630e946977735303d3a65b0383 | [
"ZPL-2.1"
] | 149 | 2016-10-24T06:56:44.000Z | 2022-02-24T08:09:10.000Z | tests/transformer/test_assert.py | rahulbahal7/restricted-python | c39cffe71dfc30630e946977735303d3a65b0383 | [
"ZPL-2.1"
] | 30 | 2015-04-03T05:38:13.000Z | 2021-11-10T05:13:38.000Z | from tests.helper import restricted_exec
def test_RestrictingNodeTransformer__visit_Assert__1():
"""It allows assert statements."""
restricted_exec('assert 1')
| 24.285714 | 55 | 0.782353 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 44 | 0.258824 |
984d0ccced450917c0989659a1f312215ef5e3a1 | 8,944 | py | Python | mnist/train_imbalanced_mnist.py | pfnet-research/robust_estimation | 9cb404f5ae80275e927ee5ccec3e3ea6099ff392 | [
"BSD-3-Clause"
] | 3 | 2018-11-26T11:16:41.000Z | 2021-02-01T12:44:01.000Z | mnist/train_imbalanced_mnist.py | pfnet-research/robust_estimation | 9cb404f5ae80275e927ee5ccec3e3ea6099ff392 | [
"BSD-3-Clause"
] | null | null | null | mnist/train_imbalanced_mnist.py | pfnet-research/robust_estimation | 9cb404f5ae80275e927ee5ccec3e3ea6099ff392 | [
"BSD-3-Clause"
] | 1 | 2018-11-27T04:51:41.000Z | 2018-11-27T04:51:41.000Z | import argparse
import os
import numpy as np
import chainer
from chainer import functions as F
from chainer import iterators
from chainer import optimizers
from chainer import training
from chainer.training import extensions as E
from chainer_chemistry.models.prediction import Classifier
from chainer_chemistry.training.extensions import ROCAUCEvaluator # NOQA
from net import LeNet
from updaters import Proposed, LRE
def get_imbalanced_data(n_train=1000, seed=111, dataset='mnist'):
np.random.seed(seed)
if dataset == 'mnist':
train, test = chainer.datasets.get_mnist(ndim=3)
else:
train, test = chainer.datasets.cifar.get_cifar10()
x, t = chainer.dataset.concat_examples(train)
if not isinstance(n_train, dict):
d = {}
for i in np.unique(t):
d[i] = n_train
n_train = d
train_images, train_labels = None, None
for cls in n_train.keys():
n = n_train[cls]
indices = np.where(t == cls)[0]
train_ind = np.random.permutation(indices)[:n]
train_images = x[train_ind] if train_images is None else \
np.concatenate((train_images, x[train_ind]), axis=0)
train_labels = t[train_ind] if train_labels is None else \
np.concatenate((train_labels, t[train_ind]), axis=0)
x, t = chainer.dataset.concat_examples(test)
test_images = x[np.isin(t, list(n_train.keys()))]
test_labels = t[np.isin(t, list(n_train.keys()))]
train = chainer.datasets.tuple_dataset.TupleDataset(
train_images, train_labels)
test = chainer.datasets.tuple_dataset.TupleDataset(
test_images, test_labels)
return train, test
def get_binary_imbalanced_data(n_train={4: 5 * 5 - 5, 9: 995 * 5 - 5},
n_train_val={4: 5, 9: 5}, dataset='mnist'):
if dataset == 'mnist':
train, test = chainer.datasets.get_mnist(ndim=3)
else:
train, test = chainer.datasets.cifar.get_cifar10()
x, t = chainer.dataset.concat_examples(train)
x_test, t_test = chainer.dataset.concat_examples(test)
if not isinstance(n_train, dict) or not isinstance(n_train_val, dict):
raise TypeError
if len(np.unique(n_train.keys())) >= 2:
raise NotImplementedError
train_images, train_labels = None, None
train_val_images, train_val_labels = None, None
test_images, test_labels = None, None
for i, cls in enumerate(n_train.keys()):
n1 = n_train[cls]
n2 = n_train_val[cls]
indices = np.where(t == cls)[0]
train_ind, train_val_ind, _ = np.split(
np.random.permutation(indices), np.cumsum([n1, n2]))
train_images = x[train_ind] if train_images is None else \
np.concatenate((train_images, x[train_ind]), axis=0)
train_label = np.full(len(train_ind), i)
train_labels = train_label if train_labels is None else \
np.concatenate((train_labels, train_label), axis=0)
train_val_images = x[train_val_ind] if train_val_images is None \
else np.concatenate((train_val_images, x[train_val_ind]), axis=0)
train_val_label = np.full(len(train_val_ind), i)
train_val_labels = train_val_label if train_val_labels is None \
else np.concatenate((train_val_labels, train_val_label), axis=0)
test_ind = np.where(t_test == cls)[0]
test_images = x_test[test_ind] if test_images is None else \
np.concatenate((test_images, x_test[test_ind]), axis=0)
test_label = np.full(len(test_ind), i)
test_labels = test_label if test_labels is None else np.concatenate(
(test_labels, test_label), axis=0)
train = chainer.datasets.tuple_dataset.TupleDataset(
train_images, train_labels)
train_val = chainer.datasets.tuple_dataset.TupleDataset(
train_val_images, train_val_labels)
test = chainer.datasets.tuple_dataset.TupleDataset(
test_images, test_labels)
return train, train_val, test
def main():
parser = argparse.ArgumentParser(
description='Imbalanced MNIST classification')
parser.add_argument('--eval-mode', type=int, default=1,
help='Evaluation mode.'
'0: only binary_accuracy is calculated.'
'1: binary_accuracy and ROC-AUC score is calculated')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='batch size')
parser.add_argument('--gpu', '-g', type=int, default=-1,
help='GPU ID to use. Negative value indicates '
'not to use GPU and to run the code in CPU.')
parser.add_argument('--out', '-o', type=str, default='result',
help='path to output directory')
parser.add_argument('--epoch', '-e', type=int, default=10,
help='number of epochs')
parser.add_argument('--resume', '-r', type=str, default='',
help='path to a trainer snapshot')
parser.add_argument('--frequency', '-f', type=int, default=-1,
help='Frequency of taking a snapshot')
parser.add_argument('--protocol', type=int, default=2,
help='protocol version for pickle')
parser.add_argument('--model-filename', type=str, default='classifier.pkl',
help='file name for pickled model')
parser.add_argument('--updater-type', type=str, default='standard')
parser.add_argument('--sampling-size', type=int, default=32)
parser.add_argument('--optimizer-type', type=str, default='Adam')
parser.add_argument('--alpha', type=str, default='0.001')
args = parser.parse_args()
# Dataset preparation
train, train_val, val = get_binary_imbalanced_data()
train_iter = iterators.SerialIterator(train, args.batchsize)
val_iter = iterators.SerialIterator(val, args.batchsize,
repeat=False, shuffle=False)
model = LeNet(n_class=1, binary=True)
classifier = Classifier(model,
lossfun=F.sigmoid_cross_entropy,
metrics_fun=F.binary_accuracy,
device=args.gpu)
if args.optimizer_type == 'Adam':
optimizer = optimizers.Adam()
else:
optimizer = optimizers.SGD(lr=1e-3)
optimizer.setup(classifier)
updater_type = args.updater_type
if updater_type == 'standard':
updater = training.StandardUpdater(
train_iter, optimizer, device=args.gpu)
elif updater_type == 'proposed':
updater = Proposed(
train_iter, optimizer, device=args.gpu,
sampling_size=args.sampling_size)
elif updater_type == 'LRE':
x, t = chainer.dataset.concat_examples(train)
train_val_iter = iterators.SerialIterator(train_val, len(train_val))
updater = LRE(
{'main': train_iter, 'val': train_val_iter}, optimizer,
device=args.gpu, alpha=args.alpha)
trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)
trainer.extend(E.Evaluator(val_iter, classifier,
device=args.gpu))
trainer.extend(E.LogReport())
eval_mode = args.eval_mode
if eval_mode == 0:
trainer.extend(E.PrintReport([
'epoch', 'main/loss', 'main/accuracy', 'validation/main/loss',
'validation/main/accuracy', 'elapsed_time']))
elif eval_mode == 1:
train_eval_iter = iterators.SerialIterator(train, args.batchsize,
repeat=False, shuffle=False)
trainer.extend(ROCAUCEvaluator(
train_eval_iter, classifier, eval_func=model,
device=args.gpu, name='train',
pos_labels=1, ignore_labels=-1, raise_value_error=False))
# extension name='validation' is already used by `Evaluator`,
# instead extension name `val` is used.
trainer.extend(ROCAUCEvaluator(
val_iter, classifier, eval_func=model,
device=args.gpu, name='val',
pos_labels=1, ignore_labels=-1))
trainer.extend(E.PrintReport([
'epoch', 'main/loss', 'main/accuracy', 'train/main/roc_auc',
'validation/main/loss', 'validation/main/accuracy',
'val/main/roc_auc', 'elapsed_time']))
else:
raise ValueError('Invalid accfun_mode {}'.format(eval_mode))
trainer.extend(E.ProgressBar(update_interval=10))
frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
trainer.extend(E.snapshot(), trigger=(frequency, 'epoch'))
if args.resume:
chainer.serializers.load_npz(args.resume, trainer)
trainer.run()
classifier.save_pickle(os.path.join(args.out, args.model_filename),
protocol=args.protocol)
if __name__ == '__main__':
main()
| 40.288288 | 79 | 0.633274 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,123 | 0.125559 |
984deaa3e520bef0fbb915ca5c77f71dc4dc260d | 307 | py | Python | driver/urls.py | mzazakeith/uber-clone | 02f0be423681be5517182b278176feede1126b7a | [
"MIT"
] | 1 | 2022-01-13T19:31:56.000Z | 2022-01-13T19:31:56.000Z | driver/urls.py | mzazakeith/uber-clone | 02f0be423681be5517182b278176feede1126b7a | [
"MIT"
] | 5 | 2020-02-12T01:28:33.000Z | 2021-06-10T20:44:23.000Z | driver/urls.py | mzazakeith/uber-clone | 02f0be423681be5517182b278176feede1126b7a | [
"MIT"
] | null | null | null | from django.conf.urls import url, include
from driver import views
# from djgeojson.views import GeoJSONLayerView
# from driver.models import Points
urlpatterns = [
url(r'^new/driver$', views.create_driver_profile, name='new-driver-profile'),
url(r'^new/car$', views.submit_car, name='new-car'),
] | 30.7 | 81 | 0.742671 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 136 | 0.442997 |
984f790a33323506d4783165dffb1e776abcf279 | 3,456 | py | Python | server/score_calculator/model_mean.py | moonhc/league-of-legend-win-prediction | 36f0a8d3d2710d23fa9b66004989cecaa64b127d | [
"MIT"
] | null | null | null | server/score_calculator/model_mean.py | moonhc/league-of-legend-win-prediction | 36f0a8d3d2710d23fa9b66004989cecaa64b127d | [
"MIT"
] | 1 | 2019-05-27T08:14:43.000Z | 2019-05-27T08:14:43.000Z | server/score_calculator/model_mean.py | SeongJinAhn/league-of-legend-win-prediction | 50898f928bf36fdbf6fedb8a6fc7f926d4a364a3 | [
"MIT"
] | 2 | 2019-05-12T13:34:35.000Z | 2019-05-23T14:24:44.000Z | import numpy as np
import tensorflow as tf
from utils import fc_block
from params import*
class Model():
def __init__(self, input_dim=INPUT_DIM, output_dim=OUTPUT_DIM, dim_hidden=DIM_HIDDEN, latent_dim=LATENT_DIM, update_lr=LEARNING_RATE, scope='model'):
self.input_dim = input_dim
self.output_dim = 2
self.dim_hidden = dim_hidden
self.latent_dim = latent_dim
self.update_lr = update_lr
self.scope = scope
self.flag = 0
def construct_weights(self, scope=''):
weights = {}
weights['w1'] = tf.Variable(tf.random_normal([self.input_dim, 128], stddev=0.1), name=scope+'w1')
weights['b1'] = tf.Variable(tf.zeros([128]), name=scope+'b1')
weights['w2'] = tf.Variable(tf.random_normal([128, self.dim_hidden], stddev=0.1), name=scope+'w2')
weights['b2'] = tf.Variable(tf.zeros([self.dim_hidden]), name=scope+'b2')
weights['w5'] = tf.Variable(tf.random_normal([self.dim_hidden, self.dim_hidden], stddev=0.1), name=scope+'w5')
weights['b5'] = tf.Variable(tf.zeros([self.dim_hidden]), name=scope+'b5')
weights['w3'] = tf.Variable(tf.random_normal([self.dim_hidden, 64], stddev=0.1), name=scope+'w3')
weights['b3'] = tf.Variable(tf.zeros([64]), name=scope+'b3')
weights['w4'] = tf.Variable(tf.random_normal([64,self.output_dim], stddev=0.1), name=scope+'w4')
return weights
def forward(self, inp, weights, prob, reuse=False, scope='', is_training=True):
hidden = fc_block(inp, weights['w1'], weights['b1'], prob, reuse, scope+'0', is_training=is_training)
hidden = fc_block(hidden, weights['w2'], weights['b2'], prob, reuse, scope+'1', is_training=is_training)
hidden = fc_block(hidden, weights['w5'], weights['b5'], prob, reuse, scope+'3', is_training=is_training)
hidden = fc_block(hidden, weights['w3'], weights['b3'], prob, reuse, scope+'2', is_training=is_training)
return tf.matmul(hidden, weights['w4'])
def construct_model(self, input_var, target_var, prob, decay_lr, is_training):
batch_size = tf.shape(input_var)[0]
input_var = tf.one_hot(input_var, self.input_dim, axis=-1)
input_var = tf.reduce_sum(input_var, axis=1)
self.input_var = input_var
with tf.variable_scope(self.scope, reuse=None) as training_scope:
if 'weights' in dir(self):
training_scope.reuse_variables()
weights = self.weights
else:
self.weights = weights = self.construct_weights()
if not 'check' in dir(self):
self.check=1
self.forward(input_var, weights, prob, False)
output = self.forward(input_var, weights, prob, True, is_training=is_training)
self.output = tf.nn.softmax(output, axis=1)
_, regularizer1 = tf.nn.moments(tf.expand_dims(output[:,1], axis=1), axes=0)
self.loss = tf.reduce_mean(tf.losses.mean_squared_error(predictions=output, labels=target_var))
if self.flag == 0:
self.op = tf.train.AdamOptimizer(decay_lr).minimize(self.loss + 0.5 - 0.5*tf.log(regularizer1+1e-1), var_list=weights)
else:
self.op = tf.train.AdamOptimizer(decay_lr).minimize(self.loss + 0.5 - 0.5*tf.log(regularizer1+1e-1), var_list=weights['w4','w3','b3']) | 55.741935 | 154 | 0.624711 | 3,358 | 0.971644 | 0 | 0 | 0 | 0 | 0 | 0 | 159 | 0.046007 |