hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
5b54b2dda0833770af7c0575bf7b026b9ce83b41 | 5,628 | py | Python | Cinema 4D/Clean.py | FitchOpenSource/C4D-To-Unity | a7b976d552ff4f44df473240623409b5768ea322 | [
"MIT"
] | 2 | 2017-09-06T13:19:35.000Z | 2017-11-17T18:42:08.000Z | Cinema 4D/Clean.py | FitchOpenSource/C4D-To-Unity | a7b976d552ff4f44df473240623409b5768ea322 | [
"MIT"
] | null | null | null | Cinema 4D/Clean.py | FitchOpenSource/C4D-To-Unity | a7b976d552ff4f44df473240623409b5768ea322 | [
"MIT"
] | null | null | null | import c4d
from c4d import gui, documents
#Welcome to the world of Python
def findNameMaterial(string, materials, cnt):
cnt = cnt + 1
string = string + "_" + str(cnt)
if materials.count(string) == 0:
return string
else:
string = findNameMaterial(string,materials,cnt)
return string
#Create a unique name for the materials and remove unorthodox characters
def cleanMaterials():
mats = doc.GetMaterials()
materials = []
for x in mats:
string = x.GetName()
if string.find(".") != -1:
string = x.GetName()
string = string.replace(".", "")
string = string.replace("*", "")
if materials.count(string) == 0:
materials.append(string)
else:
string = findNameMaterial(string, materials,0)
materials.append(string)
x.SetName(string)
c4d.documents.SetActiveDocument(doc)
#def findNameObject(string, objects, cnt):
#
#cnt = cnt + 1
#tmp = string;
#string = string + "_" + str(cnt)
#if objects.count(string) == 0:
#return string
#else:
#string = findNameObject(tmp,objects,cnt)
#return string
def iterateChildren(obj, objects):
#cleanObjects(obj, objects)
CleanTags(obj)
for child in obj.GetChildren():
iterateChildren(child, objects)
def CleanTags(obj):
doc.SetActiveObject(obj)
lists = []
lists.append(obj)
listTags = obj.GetTags()
listMultipleTextureTags = []
# Make current Object Editable
c4d.CallCommand(12236)
c4d.documents.SetActiveDocument(doc)
# Null Object are ignored
if obj.GetType() == c4d.Onull:
return
####################
#Remove Duplicated texture tags (keeps Polygon Selection)
hasUVWLock = False
for t in listTags:
if type(t) == c4d.TextureTag and t.GetMaterial() is not None:
selection = t[c4d.TEXTURETAG_RESTRICTION]
if selection == "":
listMultipleTextureTags.append(t)
if type(t) == c4d.TextureTag and t.GetMaterial() is None:
t.Remove()
increment = 0
tag = None
for tTags in listMultipleTextureTags:
selection = listMultipleTextureTags[increment][c4d.TEXTURETAG_RESTRICTION]
if len(listMultipleTextureTags) != (increment + 1):
listMultipleTextureTags[increment].Remove()
tag = listMultipleTextureTags[increment]
increment = increment + 1
####################
#if uvw tag is locked(UVWTAG_LOCK = true) then we don't erase it
UVWtag = obj.GetTag(c4d.Tuvw)
if UVWtag is not None and (tag is None or tag[c4d.TEXTURETAG_PROJECTION] == 6):
obj.GetTag(c4d.Tuvw)[c4d.UVWTAG_LOCK] = True
c4d.EventAdd()
listTags = obj.GetTags()
for t in listTags:
if type(t) == c4d.UVWTag:
if t[c4d.UVWTAG_LOCK] == False:
t.Remove()
else:
hasUVWLock = True
# Generate 2 UVW tags one for texture and second for lighting
if tag is None or tag[c4d.TEXTURETAG_PROJECTION] == 6 or UVWtag is None:
doc.SetActiveObject(obj)
if hasUVWLock == False:
# Tags menu, UVW tags -> set from projection command
c4d.CallCommand(1030000, 1030000)
doc.SetActiveObject(obj)
if obj.GetTag(c4d.Tuvw) is not None:
obj.GetTag(c4d.Tuvw)[c4d.UVWTAG_LOCK] = True
doc.SetActiveObject(obj)
# Tags menu, UVW tags -> set from projection command
c4d.CallCommand(1030000, 1030000)
else:
doc.SetActiveTag(tag)
# Tags menu, Generate UVW cordinates (this is in case texture tags projection is not set to UVW mapping )
c4d.CallCommand(12235, 12235)
tag[c4d.TEXTURETAG_TILE]=True
obj.GetTag(c4d.Tuvw)[c4d.UVWTAG_LOCK] = True
doc.SetActiveObject(obj)
# Tags menu, UVW tags -> set from projection command
c4d.CallCommand(1030000, 1030000)
c4d.documents.SetActiveDocument(doc)
Ttag = obj.GetTag(c4d.Ttexture)
if Ttag is not None:
obj.InsertTag(Ttag, None)
# Create a Unique name for the material (unity needs a unique name for materials)
#def cleanObjects(x,objects):
#string = x.GetName()
#if string.find(".") != -1:
#string = string.replace(".", "")
#if objects.count(string) == 0:
#objects.append(string)
#else:
#string = findNameObject(string, objects,0)
#objects.append(string)
#x.SetName(string)
#c4d.documents.SetActiveDocument(doc)
#Remove Invisble Objects
def iterateChildrenInvisble(obj):
removeInvisbleObjects(obj)
for child in obj.GetChildren():
iterateChildrenInvisble(child)
def removeInvisbleObjects(obj):
# if object is Invisible in Editor and Render delete it
if obj.GetEditorMode()== 1 and obj.GetRenderMode()== 1:
obj.Remove()
####################
def main():
#Remove Invisble Objects
for obj in doc.GetObjects():
iterateChildrenInvisble(obj)
#Create a unique name for the materials and remove unorthodox characters
cleanMaterials()
#Add UVW tags (one for texture and other on for light)and Removed usless Texture tags
objects = []
for obj in doc.GetObjects():
iterateChildren(obj, objects)
print("Done")
if __name__=='__main__':
main()
| 28.14 | 113 | 0.598792 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,576 | 0.280028 |
5b56109d5d5abec15a2278c520d6306048bb083e | 270 | py | Python | templates/flask/flaskRest/app/configs/config.py | david-osas/create-basic-app | 860fc579672855093ad8426fb01d010de4c7cff8 | [
"MIT"
] | 2 | 2020-12-01T11:33:36.000Z | 2020-12-01T12:25:49.000Z | templates/flask/flaskRest/app/configs/config.py | david-osas/create-basic-app | 860fc579672855093ad8426fb01d010de4c7cff8 | [
"MIT"
] | 2 | 2020-11-25T14:38:57.000Z | 2020-11-25T22:55:25.000Z | templates/flask/flaskRest/app/configs/config.py | david-osas/create-basic-app | 860fc579672855093ad8426fb01d010de4c7cff8 | [
"MIT"
] | 2 | 2020-11-26T08:59:50.000Z | 2021-03-30T20:01:06.000Z | from .development_config import DevelopmentConfig
from .production_config import ProductionConfig
from .testing_config import TestingConfig
ENVIRONMENT_MAPPING = {
"production": ProductionConfig,
"development": DevelopmentConfig,
"testing": TestingConfig
}
| 27 | 49 | 0.807407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.125926 |
5b561f3a8cc1973bf3b5ae98192b62df11c2ad7d | 743 | py | Python | LeetCode/Python/160.intersection-of-two-linked-lists.py | Alfonsxh/LeetCode-Challenge-python | e93f93fd58d1945708d6aa300dcbcd17d0708274 | [
"MIT"
] | null | null | null | LeetCode/Python/160.intersection-of-two-linked-lists.py | Alfonsxh/LeetCode-Challenge-python | e93f93fd58d1945708d6aa300dcbcd17d0708274 | [
"MIT"
] | null | null | null | LeetCode/Python/160.intersection-of-two-linked-lists.py | Alfonsxh/LeetCode-Challenge-python | e93f93fd58d1945708d6aa300dcbcd17d0708274 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=160 lang=python
#
# [160] Intersection of Two Linked Lists
#
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# 参考:https://leetcode-cn.com/problems/intersection-of-two-linked-lists/solution/tu-jie-xiang-jiao-lian-biao-by-user7208t/
class Solution(object):
def getIntersectionNode(self, headA, headB):
"""
:type head1, head1: ListNode
:rtype: ListNode
"""
pointA = headA
pointB = headB
while pointA != pointB:
pointA = pointA.next if pointA is not None else headB
pointB = pointB.next if pointB is not None else headA
return pointA
| 27.518519 | 121 | 0.625841 | 391 | 0.522029 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.562083 |
5b593df1d532c6d0fa3c3a20d72a0f9c45807594 | 3,475 | py | Python | test/client/test_utils.py | DobromirM/swim-system-python | a5b4f05457f1eb2739a920c42dfc721c83a1226a | [
"Apache-2.0"
] | 8 | 2019-11-11T19:38:59.000Z | 2022-01-06T11:13:04.000Z | test/client/test_utils.py | swimos/swim-system-python | 727c09b6e7300b063e320364373ff724d9b8af90 | [
"Apache-2.0"
] | 40 | 2019-10-29T10:35:49.000Z | 2021-05-14T22:18:35.000Z | test/client/test_utils.py | DobromirM/swim-system-python | a5b4f05457f1eb2739a920c42dfc721c83a1226a | [
"Apache-2.0"
] | 3 | 2020-01-31T18:28:58.000Z | 2021-08-25T08:53:13.000Z | # Copyright 2015-2021 SWIM.AI inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from urllib.parse import urlparse
from swimai.client._utils import _URI
class TestUtils(unittest.TestCase):
def test_normalise_scheme_valid_ws(self):
# Given
uri = 'ws://foo_bar:9000'
uri = urlparse(uri)
# When
actual = _URI._normalise_scheme(uri)
# Then
self.assertEqual('ws', actual)
def test_normalise_scheme_valid_warp(self):
# Given
uri = 'warp://foo_bar:9000'
uri = urlparse(uri)
# When
actual = _URI._normalise_scheme(uri)
# Then
self.assertEqual('ws', actual)
def test_normalise_scheme_valid_wss(self):
# Given
uri = 'wss://foo_bar:9000'
uri = urlparse(uri)
# When
actual = _URI._normalise_scheme(uri)
# Then
self.assertEqual('wss', actual)
def test_normalise_scheme_valid_warps(self):
# Given
uri = 'warps://foo_bar:9000'
uri = urlparse(uri)
# When
actual = _URI._normalise_scheme(uri)
# Then
self.assertEqual('wss', actual)
def test_normalise_scheme_invalid_empty(self):
# Given
uri = 'foo_bar:9000'
uri = urlparse(uri)
# When
actual = _URI._normalise_scheme(uri)
# Then
self.assertIsNone(actual)
def test_normalise_scheme_invalid_http(self):
# Given
uri = 'http://foo_bar:9000'
uri = urlparse(uri)
# When
actual = _URI._normalise_scheme(uri)
# Then
self.assertIsNone(actual)
def test_parse_ws_uri(self):
# Given
uri = 'ws://foo_bar:9000'
expected = ('ws://foo_bar:9000', 'ws')
# When
actual = _URI._parse_uri(uri)
# Then
self.assertEqual(expected, actual)
def test_parse_warp_uri(self):
# Given
uri = 'warp://foo_bar:9000'
expected = ('ws://foo_bar:9000', 'ws')
# When
actual = _URI._parse_uri(uri)
# Then
self.assertEqual(expected, actual)
def test_parse_wss_uri(self):
# Given
uri = 'wss://foo_bar:9000'
expected = ('wss://foo_bar:9000', 'wss')
# When
actual = _URI._parse_uri(uri)
# Then
self.assertEqual(expected, actual)
def test_parse_warps_uri(self):
# Given
uri = 'warps://foo_bar:9000'
expected = ('wss://foo_bar:9000', 'wss')
# When
actual = _URI._parse_uri(uri)
# Then
self.assertEqual(expected, actual)
def test_parse_invalid_scheme_uri(self):
# Given
uri = 'carp://foo_bar:9000'
# When
with self.assertRaises(TypeError) as error:
_URI._parse_uri(uri)
# Then
message = error.exception.args[0]
self.assertEqual('Invalid scheme "carp" for Warp URI!', message)
| 28.483607 | 75 | 0.601151 | 2,791 | 0.803165 | 0 | 0 | 0 | 0 | 0 | 0 | 1,158 | 0.333237 |
5b5975a909c1137119120f7fc6fd7242b0e2d766 | 656 | py | Python | regtests/list/slice.py | ahakingdom/Rusthon | 5b6b78111b62281cd1381e53362c5d4b520ade30 | [
"BSD-3-Clause"
] | 622 | 2015-01-01T14:53:51.000Z | 2022-03-27T14:52:25.000Z | regtests/list/slice.py | ahakingdom/Rusthon | 5b6b78111b62281cd1381e53362c5d4b520ade30 | [
"BSD-3-Clause"
] | 74 | 2015-01-05T01:24:09.000Z | 2021-04-26T00:06:38.000Z | regtests/list/slice.py | ahakingdom/Rusthon | 5b6b78111b62281cd1381e53362c5d4b520ade30 | [
"BSD-3-Clause"
] | 67 | 2015-01-18T22:54:54.000Z | 2022-03-01T12:54:23.000Z | from runtime import *
"""list slice"""
class XXX:
def __init__(self):
self.v = range(10)
def method(self, a):
return a
def main():
a = range(10)[:-5]
assert( len(a)==5 )
assert( a[4]==4 )
print '--------'
b = range(10)[::2]
print b
assert( len(b)==5 )
assert( b[0]==0 )
assert( b[1]==2 )
assert( b[2]==4 )
assert( b[3]==6 )
assert( b[4]==8 )
#if BACKEND=='DART':
# print(b[...])
#else:
# print(b)
c = range(20)
d = c[ len(b) : ]
#if BACKEND=='DART':
# print(d[...])
#else:
# print(d)
assert( len(d)==15 )
x = XXX()
e = x.v[ len(b) : ]
assert( len(e)==5 )
f = x.method( x.v[len(b):] )
assert( len(f)==5 )
main()
| 13.387755 | 29 | 0.501524 | 85 | 0.129573 | 0 | 0 | 0 | 0 | 0 | 0 | 128 | 0.195122 |
5b5a42e3e8c1b9664b17bcd555310e3c00b19953 | 4,661 | py | Python | src/test/test.py | ntalabot/base_dl_project_struct | 2a8b52081baf678fec4b74b16f41dd22a3d0eb21 | [
"MIT"
] | null | null | null | src/test/test.py | ntalabot/base_dl_project_struct | 2a8b52081baf678fec4b74b16f41dd22a3d0eb21 | [
"MIT"
] | null | null | null | src/test/test.py | ntalabot/base_dl_project_struct | 2a8b52081baf678fec4b74b16f41dd22a3d0eb21 | [
"MIT"
] | null | null | null | """
Module for testing models (evaluation, predictions).
"""
import numpy as np
import matplotlib.pyplot as plt
import torch
import torchvision
def predict_dataloader(model, dataloader, discard_target=True):
"""
Return predictions for the given dataloader and model.
Parameters
----------
model : pytorch model
The pytorch model to predict with.
dataloader : pytorch dataloader
Dataloader returning batches of inputs. Shuffle should be False if
order is important in the predictions
discard_target : bool (default = True)
If True, onyl the first element of the batch is kept. This is useful if
the dataloader returns batches as (inputs, targets, ...). If it only
returns inputs directly as a tensor, set this to False.
Returns
-------
predictions : list of tensors
List of predicted batch.
"""
model_device = next(model.parameters()).device
predictions = []
model.eval()
with torch.no_grad():
for batch in dataloader:
if discard_target:
batch = batch[0]
batch = batch.to(model_device)
predictions.append(model(batch))
return predictions
def predict(model, inputs, batch_size=None):
"""
Output predictions for the given input and model.
Parameters
----------
model : pytorch model
The pytorch model to predict with.
inputs : tensor
Input tensors.
batch_size : int (optional)
Number of images to send to the network at once. Useful if inputs is
too large for the GPU. If not given, inputs is sent whole.
Returns
-------
predictions : tensor
Tensor of predictions on the same device as the input.
"""
model_device = next(model.parameters()).device
predictions = []
model.eval()
with torch.no_grad():
if batch_size is None:
batch_size = len(inputs)
n_batches = int(np.ceil(len(inputs) / batch_size))
for i in range(n_batches):
batch = inputs[i * batch_size: (i + 1) * batch_size]
batch = batch.to(model_device)
preds = model(batch).to(inputs.device)
predictions.append(preds)
predictions = torch.cat(predictions)
return predictions
def evaluate_dataloader(model, dataloader, metrics):
"""
Return the average metric values for the given dataloader and model.
Parameters
----------
model : pytorch model
The pytorch model to predict with.
dataloader : pytorch dataloader
Dataloader returning batches of inputs.
metrics : dict
Dictionary where keys are metric names (str), and values are metric
function (callable, taking (predictions, targets) as inputs).
Returns
-------
values : dict
Dictionary where keys are metric names (str), and values are metric
average values.
"""
values = {}
for key in metrics.keys():
values[key] = 0
model_device = next(model.parameters()).device
model.eval()
with torch.no_grad():
for batch in dataloader:
inputs = batch[0].to(model_device)
targets = batch[1].to(model_device)
preds = model(inputs)
for key, metric in metrics.items():
values[key] += metric(preds, targets).item() * inputs.shape[0]
for key in values.keys():
values[key] /= len(dataloader.dataset)
return values
def evaluate(model, inputs, targets, metrics, batch_size=None):
"""
Return the average metric values for the given inputs and model.
Parameters
----------
model : pytorch model
The pytorch model to predict with.
inputs : tensor
Input tensors.
targets : tensor
Target tensors.
metrics : dict
Dictionary where keys are metric names (str), and values are metric
function (callable, taking (predictions, targets) as inputs).
batch_size : int (optional)
Number of images to send to the network at once. Useful if inputs is
too large for the GPU. If not given, inputs is sent whole.
Returns
-------
values : dict
Dictionary where keys are metric names (str), and values are metric
average values.
"""
# Make predictions
predictions = predict(model, inputs, batch_size=batch_size)
# Compute metrics
values = {}
for key, metric in metrics.items():
values[key] = metric(predictions, targets).item()
return values | 30.464052 | 79 | 0.617035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,641 | 0.566617 |
5b5b10607266561433613c17a985d903f65eb540 | 524 | py | Python | forums/migrations/0007_auto_20191203_0820.py | phiratio/django-forums-app | a8d50b436bc34f74ab8c58234f5f7cf5175e00c5 | [
"MIT"
] | 22 | 2019-10-14T20:57:18.000Z | 2022-01-13T11:32:16.000Z | forums/migrations/0007_auto_20191203_0820.py | phiratio/django-forums-app | a8d50b436bc34f74ab8c58234f5f7cf5175e00c5 | [
"MIT"
] | 22 | 2019-10-16T12:21:59.000Z | 2021-12-16T14:05:46.000Z | forums/migrations/0007_auto_20191203_0820.py | phiratio/django-forums-app | a8d50b436bc34f74ab8c58234f5f7cf5175e00c5 | [
"MIT"
] | 10 | 2019-10-15T19:55:30.000Z | 2022-02-27T13:53:55.000Z | # Generated by Django 2.2.5 on 2019-12-03 08:20
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('forums', '0006_auto_20191203_0758'),
]
operations = [
migrations.AlterField(
model_name='post',
name='thread',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='posts',
to='forums.Thread'),
),
]
| 26.2 | 102 | 0.599237 | 398 | 0.759542 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.221374 |
5b5b2553a6c42f4fdcde3de08cf09dc44f4ebc02 | 4,078 | py | Python | hw4/generate.py | anthonywchen/uci-statnlp | 0cb98fecda11158601702f9fdb4f85a8011b5f5c | [
"Apache-2.0"
] | 16 | 2017-01-19T21:36:05.000Z | 2022-03-07T05:49:01.000Z | hw4/generate.py | anthonywchen/uci-statnlp | 0cb98fecda11158601702f9fdb4f85a8011b5f5c | [
"Apache-2.0"
] | null | null | null | hw4/generate.py | anthonywchen/uci-statnlp | 0cb98fecda11158601702f9fdb4f85a8011b5f5c | [
"Apache-2.0"
] | 82 | 2017-01-22T00:06:51.000Z | 2021-10-07T11:45:48.000Z | import argparse
import json
import random
import jsonlines
import tqdm
from transformers import BartTokenizer, BartForConditionalGeneration
import decoders
from models import TransformerModel
random.seed(0)
def generate_summary(model, tokenizer, document, decoder):
""" Generates a summary for a single document
Parameters
----------
model: ``BartForConditionalGeneration`` A BART model that has been
fine-tuned for summarization
tokenizer: ``BartForConditionalGeneration``: A corresponding BART tokenizer
document: ``str`` A single document to be summarized
decoder: ``str`` The decoder to use for decoding
Returns:
----------
summary: ``str`` A generated summary of the input document
summary_score: ``float`` The log-probability score of the summary
"""
input_ids = tokenizer(document, truncation=True, return_tensors='pt')['input_ids']
metadata = {'input_ids': input_ids}
model_wrapper = TransformerModel(model)
if decoder == 'greedy':
top_candidate = decoders.greedy_decoding(
model=model_wrapper,
max_length=50,
eos_id=tokenizer.eos_token_id,
decoded_ids=[tokenizer.bos_token_id],
metadata=metadata
)
elif decoder == 'beam_search':
top_candidate = decoders.beam_search_decoding(
model=model_wrapper,
beam_size=3,
max_length=50,
eos_id=tokenizer.eos_token_id,
decoded_ids=[tokenizer.bos_token_id],
metadata=metadata
)[0]
elif decoder == 'random':
# Random sampling
top_candidate = decoders.top_k_sampling(
model=model_wrapper,
top_k=int(1e9), # random sampling is top-K with large K
temperature=1,
max_length=50,
eos_id=tokenizer.eos_token_id,
decoded_ids=[tokenizer.bos_token_id],
metadata=metadata
)
elif decoder == 'top_k':
top_candidate = decoders.top_k_sampling(
model=model_wrapper,
top_k=3,
temperature=0.5,
max_length=50,
eos_id=tokenizer.eos_token_id,
decoded_ids=[tokenizer.bos_token_id],
metadata=metadata
)
elif decoder == 'nucleus':
top_candidate = decoders.nucleus_sampling(
model=model_wrapper,
top_p=0.2,
max_length=50,
eos_id=tokenizer.eos_token_id,
decoded_ids=[tokenizer.bos_token_id],
metadata=metadata
)
summary_ids = top_candidate.decoded_ids
summary = tokenizer.decode(summary_ids, skip_special_tokens=True)
summary_score = top_candidate.score
return summary, summary_score
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--input_file")
parser.add_argument("--output_file")
parser.add_argument("--decoder",
choices=['greedy', 'beam_search', 'random', 'top_k', 'nucleus'])
args = parser.parse_args()
model_name = 'sshleifer/distilbart-xsum-1-1'
model = BartForConditionalGeneration.from_pretrained(model_name).eval()
tokenizer = BartTokenizer.from_pretrained(model_name)
# Iterate through input file documents, generating summaries
outputs = []
for line in tqdm.tqdm(jsonlines.open(args.input_file)):
summary, summary_score = generate_summary(model=model,
tokenizer=tokenizer,
document=line['document'],
decoder=args.decoder)
outputs.append({'id': line['id'],
'generated_summary': summary,
'generated_summary_score': summary_score})
# Write out the generated summaries to file
with open(args.output_file, 'w', encoding='utf-8') as f:
for l in outputs:
f.write(json.dumps(l, ensure_ascii=False) + '\n')
if __name__ == "__main__":
main()
| 33.702479 | 88 | 0.61844 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 976 | 0.239333 |
5b5d7896ccaafcb748c653f5c939c542c31a8d1a | 1,048 | py | Python | xain/grpc/test_grpc.py | skade/xain | 4d9a84b64d98dd4229968755110c60e1c0fd51f7 | [
"Apache-2.0"
] | null | null | null | xain/grpc/test_grpc.py | skade/xain | 4d9a84b64d98dd4229968755110c60e1c0fd51f7 | [
"Apache-2.0"
] | null | null | null | xain/grpc/test_grpc.py | skade/xain | 4d9a84b64d98dd4229968755110c60e1c0fd51f7 | [
"Apache-2.0"
] | null | null | null | from concurrent import futures
import grpc
import numpy as np
import pytest
from numproto import ndarray_to_proto, proto_to_ndarray
from xain.grpc import hellonumproto_pb2, hellonumproto_pb2_grpc
from xain.grpc.numproto_server import NumProtoServer
@pytest.fixture
def greeter_server():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
hellonumproto_pb2_grpc.add_NumProtoServerServicer_to_server(
NumProtoServer(), server
)
server.add_insecure_port("localhost:50051")
server.start()
yield
server.stop(0)
# pylint: disable=W0613,W0621
@pytest.mark.integration
def test_greeter_server(greeter_server):
with grpc.insecure_channel("localhost:50051") as channel:
stub = hellonumproto_pb2_grpc.NumProtoServerStub(channel)
nda = np.arange(10)
response = stub.SayHelloNumProto(
hellonumproto_pb2.NumProtoRequest(arr=ndarray_to_proto(nda))
)
response_nda = proto_to_ndarray(response.arr)
assert np.array_equal(nda * 2, response_nda)
| 27.578947 | 72 | 0.753817 | 0 | 0 | 289 | 0.275763 | 761 | 0.726145 | 0 | 0 | 63 | 0.060115 |
5b5e91b7967d2987cec2827cf1d367005896b8b4 | 7,920 | py | Python | AffluenceCounter/app/tracker.py | dvalladaresv/AIVA_2021_Deteccion_de_actividad_grupo_F | 5966c83b3eaeab37b19f6f2a070cd906209df4cc | [
"MIT"
] | null | null | null | AffluenceCounter/app/tracker.py | dvalladaresv/AIVA_2021_Deteccion_de_actividad_grupo_F | 5966c83b3eaeab37b19f6f2a070cd906209df4cc | [
"MIT"
] | 4 | 2021-05-08T16:53:34.000Z | 2021-05-09T18:31:34.000Z | AffluenceCounter/app/tracker.py | dvalladaresv/AIVA_2021_Deteccion_de_actividad_grupo_F | 5966c83b3eaeab37b19f6f2a070cd906209df4cc | [
"MIT"
] | 1 | 2022-03-21T19:16:14.000Z | 2022-03-21T19:16:14.000Z | import cv2
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"mil": cv2.TrackerMIL_create
}
class Track:
"""
Seguimiento de una persona
"""
def __init__(self, tracker_name, first_frame, bbox, id, references):
self._tracker = OPENCV_OBJECT_TRACKERS[tracker_name]()
self._bbox = bbox
self._tracker.init(first_frame, bbox)
self._frame_height, self._frame_width, _ = first_frame.shape
self._id = id
self._status = ""
self._references = references
self._x = 0
self._y = 0
self._x_last = 0
self._y_last = 0
self._timeout = 0
self._update_centroid()
def update(self, frame):
"""
Actualizar posiciones de seguimiento
:param frame: Imagen
"""
success, self._bbox = self._tracker.update(frame)
self._update_centroid()
def _update_centroid(self):
"""
Calcular el centro del bbox del track
"""
self._x_last = self._x
self._y_last = self._y
self._x = int(self._bbox[0] + (self._bbox[2]) / 2)
self._y = int(self._bbox[1] + (self._bbox[3]) / 2)
def is_finish_track(self):
"""
Comprobar si ha finalizado el seguimiento
"""
bb_area = self._bbox[2] * self._bbox[3]
xmin = max(0, self._bbox[0])
ymin = max(0, self._bbox[1])
xmax = min(self._frame_width, self._bbox[0] + self._bbox[2])
ymax = min(self._frame_height, self._bbox[1] + self._bbox[3])
bb_inner_area = (xmax - xmin) * (ymax - ymin)
try:
percent_in_area = bb_inner_area / bb_area
except ZeroDivisionError:
return False
if percent_in_area < 0.8:
return True
return False
def get_bbox(self):
"""
:return: bbox de track
"""
return self._bbox
def update_bbox(self, bbox):
"""
Actualizar las posiciones
:param bbox:
"""
self._bbox = bbox
self._update_centroid()
def get_id(self):
"""
:return: identificador del track
"""
return self._id
def check_bb_size(self):
"""
Comprobar si el tamaño de la bbox es aceptable
:return: boolean
"""
if (self._bbox[2] > self._frame_width / 3) or (self._bbox[3] > self._frame_height / 3):
return False
return True
def get_status(self):
"""
:return: Estado del track
"""
return self._status
def update_status(self):
"""
Actualizar estado del track
"""
self._ref_left()
self._ref_right()
self._ref_door()
def _ref_left(self):
"""
Comprobar si el track esta en la región de ref. izquierda
"""
left_ref = self._references["left"]
if left_ref[0] < self._x < left_ref[1] and not "L" in self._status:
self._status = self._status + "L"
def _ref_right(self):
"""
Comprobar si el track esta en la región de ref. derecha
"""
right_ref = self._references["right"]
if right_ref[0] < self._x < right_ref[1] and not "R" in self._status:
self._status = self._status + "R"
def _ref_door(self):
"""
Comprobar si el track esta en la región de ref. puerta
"""
door_ref = self._references["door"]
if door_ref[0] < self._y < door_ref[1] and not "P" in self._status:
self._status = self._status + "P"
def is_timeout(self):
"""
Comprobar si se ha producido un timeout del track
"""
if self._x == self._x_last and self._y == self._y_last:
self._timeout = self._timeout + 1
else:
self._timeout = 0
if self._timeout >= 5:
return True
else:
return False
class Tracker:
"""
Controlar el seguimiento de las personas
"""
references = {"left": (20, 120), "right": (320, 400), "door": (60, 120)}
TRACKER_TYPE = "csrt"
CONF_THRESHOLD = 0.82
NMS_THRESHOLD = 0.1
def __init__(self):
self._trackers = []
self._last_bboxes = None
self._track_id = 0
self.counter_enter = 0
self.counter_pass = 0
def refresh_bbox(self, bboxes, better_bb_index):
"""
Actualizar las bbox
:param bboxes: bboxes actuales
:param better_bb_index: Indices de la mejor bbox propuesta
:return: tupla com la bbox actualizada
"""
import operator
bb1 = tuple(map(operator.mul, bboxes[better_bb_index], (.6, .6, .6, .6)))
bb2 = tuple(map(operator.mul, bboxes[int(not better_bb_index)], (.4, .4, .4, .4)))
return tuple(map(operator.add, bb1, bb2))
def update_trackers_by_dets(self, frame, bboxes):
"""
Actualizar las bboxes de los tracks existentes o crear un nuevo track
:param frame: Imagen
:param bboxes: Nuevos bboxes detectadas por el detector
"""
for bbox in bboxes:
add_new = True
for tr in self._trackers:
bb = [bbox, tr.get_bbox()]
indicates = cv2.dnn.NMSBoxes(bb, [1., .9], self.CONF_THRESHOLD, self.NMS_THRESHOLD)
if indicates.size == 1:
add_new = False
new_bbox = self.refresh_bbox(bb, indicates[0][0])
tr.update_bbox(new_bbox)
if add_new:
new_track = Track("csrt", frame, bbox, self._track_id, references=self.references)
if not new_track.is_finish_track() and new_track.check_bb_size():
self._trackers.append(new_track)
self._track_id += 1
def get_counter_pass(self):
"""
Contador de personas que no entran en la tienda
:return: Nº de personas que pasan de largo
"""
return self.counter_pass
def get_counter_enter(self):
"""
Contador de personas que entran en la tienda
:return: Nº de personas que entran
"""
return self.counter_enter
def check_trackers(self):
"""
Comprobar el estado del seguimiento de las personas para sumar contadores
"""
for tr in self._trackers:
status = tr.get_status()
if len(status) >= 2:
if status == "LR" or status == "RL":
self.counter_pass = self.counter_pass + 1
elif status == "LP" or status == "RP":
self.counter_enter = self.counter_enter + 1
self.remove_track(tr)
def remove_track(self, tr):
"""
Remover un tracker de la lista de trackers
:param tr: Tracker a eliminar
"""
index = self._trackers.index(tr)
self._trackers.pop(index)
del tr
def track(self, frame):
"""
Actualizar el seguimiento de las personas:
- Actualizar estados
- Eliminar tracker finalizados o con timeout
:param frame: Imagen
"""
for track in self._trackers:
track.update(frame)
track.update_status()
if track.is_timeout():
self.remove_track(track)
def f(tr):
return not tr.is_finish_track()
self._trackers = list(filter(f, self._trackers))
return frame
| 29.886792 | 100 | 0.529419 | 7,763 | 0.979435 | 0 | 0 | 0 | 0 | 0 | 0 | 2,258 | 0.284885 |
5b5eb638f3c45dd8372789c8fa5a6b4ca5ba99fc | 738 | py | Python | app/middlewares/apikey_auth.py | meongbego/IOT_ADRINI | 0923b86a9d1da5d6859b70726ad1e041aecc97b2 | [
"MIT"
] | 1 | 2019-07-27T12:17:23.000Z | 2019-07-27T12:17:23.000Z | app/middlewares/apikey_auth.py | meongbego/ADRINI_IOT_PLATFORM | 0923b86a9d1da5d6859b70726ad1e041aecc97b2 | [
"MIT"
] | 4 | 2021-04-18T11:41:31.000Z | 2021-06-01T23:12:19.000Z | app/middlewares/apikey_auth.py | sofyan48/ADRINI_IOT_PLATFORM | 0923b86a9d1da5d6859b70726ad1e041aecc97b2 | [
"MIT"
] | null | null | null | from functools import wraps
from app.helpers.rest import *
from app import redis_store
from flask import request
from app.models import model as db
import hashlib
def apikey_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'apikey' not in request.headers:
return response(400, message=" Invalid access apikey ")
else:
access_token = db.get_by_id(
table="tb_channels",
field="channels_key",
value=request.headers['apikey']
)
if not access_token:
return response(400, message=" Invalid access apikey ")
return f(*args, **kwargs)
return decorated_function | 29.52 | 71 | 0.605691 | 0 | 0 | 0 | 0 | 514 | 0.696477 | 0 | 0 | 93 | 0.126016 |
5b60c8c38a51a827116df702a80c66bfa7ab4576 | 4,204 | py | Python | tests/recog_tests.py | anthonys01/snip2fumen | 43f5cda3cffd34108082b6653216df9a0480025a | [
"MIT"
] | null | null | null | tests/recog_tests.py | anthonys01/snip2fumen | 43f5cda3cffd34108082b6653216df9a0480025a | [
"MIT"
] | 2 | 2022-02-12T07:48:15.000Z | 2022-02-18T21:50:49.000Z | tests/recog_tests.py | anthonys01/snip2fumen | 43f5cda3cffd34108082b6653216df9a0480025a | [
"MIT"
] | null | null | null | """
Test image to fumen conversion
"""
import unittest
from snip2fumen.recog import BoardRecognizer, FumenEncoder
class RegogTests(unittest.TestCase):
"""
Test class
"""
def test_jstris1(self):
"""
Test jstris 1
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/jstris1.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@teR4GeR4PfwwBeRpDexwTpBei0wwRpilBewhg0R4At?"
"glBtBeglxhRpglAtCehlwhRpR4Aei0Btwwg0glQ4AeBtg0Q?"
"4ywglCeBtR4RphlAezhQ4RpJeAgH"
)
def test_jstris2(self):
"""
Test jstris 2
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/jstris2.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@1eRpHeRp5ewwHeywGeg0DeRpCei0BeRpCeAtEeglQ4?"
"AeBtwhAeQ4ilR4BtwhAeR4RpxwBtwhglg0Q4AeglywAtwhR?"
"4Beg0RpBtR4Ceg0RpAtzhAeh0JeAgH"
)
def test_jstris3(self):
"""
Test jstris 3
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/jstris3.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@4ewwIexwHewwfehlHewhglAeR4EewhglR4BtDewhRp?"
"DtBeg0whRpglBtCeg0wwjlRpAeh0xwRpglRpAeg0whwwQ4R?"
"phlwwAeg0whH8AeI8AeI8AeI8AeA8JeAgH"
)
def test_fourtris1(self):
"""
Test fourtris 1
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/fourtris1.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@zeAtGeglBtGeglAtFeQ4AeklCeR4BtglRpBehlR4Bt?"
"RpCeglg0R4g0zhAeglg0whQ4i0AtBeh0whywBtBeRpwhQ4x?"
"wAtCeRphlQ4wwR4Aeg0BtwhglRpBtAeg0AtxhglRpAeBtR4?"
"xhg0wwBeS4glxhg0xwAtR4AeglwhglRpAtxwAei0F8AeI8A?eC8JeAgH"
)
def test_fourtris2(self):
"""
Test fourtris 2
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/fourtris2.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@cfg0Iei0DeRpCeglDeRpAeilEeAtCeh0AewwAeBtCe?"
"g0AeywAtDeg0BeR4whDeQ4AeR4AewhDeR4Aeh0whEeQ4Aeg?"
"0AewhwwDeglAeg0AeywAeilBtAeRpBewwCeBtRpAeywzhJe?AgH"
)
def test_tetrio1(self):
"""
Test tetrio 1
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/tetrio1.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@SfilRpEeglBtRpwwBeR4xhBtywRpAexhi0BtRpAegl?"
"zhAtwwBtAeglAei0xwAeBthlRpg0Q4wwBeglh0RpAtR4Beg?"
"lg0RpAtywilAezhR4Beg0DeR4Cei0BeBtEeR4BeBtRpAeR4?CewwAeRpAezhywJeAgH"
)
def test_tetrio2(self):
"""
Test tetrio 2
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/tetrio2.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@ZfwhIewhCeRpDewhCeRpBtBewhCeywBtAeBtCewwil?"
"AewhBtAeili0whQ4Ceglh0Q4g0whR4Aewwglg0T4i0wwwhR?"
"pQ4AeglywBtRpwwAehlwwzhxwAeG8AeH8AeC8AeI8JeAgH"
)
def test_small_sc1(self):
"""
Test small board 1
"""
board_recog = BoardRecognizer()
grid = board_recog.recognize_file("./img/smallsc1.png")
self.assertEqual(FumenEncoder.to_fumen(grid),
"http://fumen.zui.jp/?v115@FhAtg0GeBtg0EeR4Ath0DeR4zhJeAgH"
)
if __name__ == '__main__':
unittest.main()
| 38.218182 | 96 | 0.578259 | 4,038 | 0.960514 | 0 | 0 | 0 | 0 | 0 | 0 | 1,818 | 0.432445 |
5b62f78362a43ef686c374acc27b359783b74d27 | 337 | py | Python | src/utility.py | wadinj/out_of_many_one | a6563a2b7bbcf66479c4cefe9bc6541d0cc5ba59 | [
"MIT"
] | null | null | null | src/utility.py | wadinj/out_of_many_one | a6563a2b7bbcf66479c4cefe9bc6541d0cc5ba59 | [
"MIT"
] | null | null | null | src/utility.py | wadinj/out_of_many_one | a6563a2b7bbcf66479c4cefe9bc6541d0cc5ba59 | [
"MIT"
] | null | null | null | """ General purpose functions """
import hashlib
LOGGING_FORMAT = '%(asctime)s %(levelname)s: %(message)s'
def hash_from_strings(items):
""" Produce a hash value from the combination of all str elements """
JOIN_KEY = '+|+'
item_text = JOIN_KEY.join(items).encode('utf-8')
return hashlib.sha256(item_text).hexdigest()
| 28.083333 | 73 | 0.691395 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.456973 |
5b64ae0e55cb234089cde25553e4f5c0de44b4d8 | 2,558 | py | Python | gqa/data/datasets/gqa.py | xiling42/VL-BERT | 4573b4e1e82b6c092d4830d0b88821e9ee1a81fb | [
"MIT"
] | null | null | null | gqa/data/datasets/gqa.py | xiling42/VL-BERT | 4573b4e1e82b6c092d4830d0b88821e9ee1a81fb | [
"MIT"
] | null | null | null | gqa/data/datasets/gqa.py | xiling42/VL-BERT | 4573b4e1e82b6c092d4830d0b88821e9ee1a81fb | [
"MIT"
] | null | null | null | import json
import os
import pickle
import numpy as np
from PIL import Image
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import h5py
from .transforms import Scale
img = None
img_info = {}
def gqa_feature_loader(root):
global img, img_info
if img is not None:
return img, img_info
h = h5py.File(root+'/data/gqa_features.hdf5', 'r')
print('kkkk: ', h.keys())
img = h['features']
bbox = h['bboxes']
img_info = json.load(open(root + '/data/gqa_objects_merged_info.json', 'r'))
return img, img_info
class GQADataset(Dataset):
def __init__(self, root, split='train', transform=None):
with open(f'{root}/data/gqa_{split}.pkl', 'rb') as f:
self.data = pickle.load(f)
with open(f'{root}/data/gqa_dic.pkl', 'rb') as f:
dic = pickle.load(f)
#
self.n_words = len(dic['word_dic']) + 1
self.n_answers = len(dic['answer_dic'])
self.root = root
self.split = split
self.img, self.img_info = gqa_feature_loader(self.root)
def __getitem__(self, index):
imgfile, question, answer = self.data[index]
# print('qt: {}, at: {}'.format(question, answer))
idx = int(self.img_info[imgfile]['index'])
# print('--------------------------------------------------------------------------')
# print(self.img_info[imgfile].keys())
# print('n words: {}, n_answers: {}'.format(self.n_words, self.n_answers))
img = torch.from_numpy(self.img[idx])
# print(img)
return img, question, len(question), answer
def __len__(self):
return len(self.data)
transform = transforms.Compose([
Scale([224, 224]),
transforms.Pad(4),
transforms.RandomCrop([224, 224]),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5],
std=[0.5, 0.5, 0.5])
])
def collate_data(batch):
images, lengths, answers = [], [], []
batch_size = len(batch)
max_len = max(map(lambda x: len(x[1]), batch))
questions = np.zeros((batch_size, max_len), dtype=np.int64)
sort_by_len = sorted(batch, key=lambda x: len(x[1]), reverse=True)
for i, b in enumerate(sort_by_len):
image, question, length, answer = b
images.append(image)
length = len(question)
questions[i, :length] = question
lengths.append(length)
answers.append(answer)
return torch.stack(images), torch.from_numpy(questions), \
lengths, torch.LongTensor(answers) | 30.452381 | 93 | 0.598124 | 1,098 | 0.429242 | 0 | 0 | 0 | 0 | 0 | 0 | 453 | 0.177091 |
5b661dbcb573ffb3020fc029bc865c3b7a199ad0 | 2,019 | py | Python | progressivis/core/changemanager_dict.py | jdfekete/progressivis | 3bc79ce229cd628ef0aa4663136a674743697b47 | [
"BSD-2-Clause"
] | 51 | 2015-09-14T16:31:02.000Z | 2022-01-12T17:56:53.000Z | progressivis/core/changemanager_dict.py | jdfekete/progressivis | 3bc79ce229cd628ef0aa4663136a674743697b47 | [
"BSD-2-Clause"
] | 10 | 2017-11-15T15:10:05.000Z | 2022-01-19T07:36:43.000Z | progressivis/core/changemanager_dict.py | jdfekete/progressivis | 3bc79ce229cd628ef0aa4663136a674743697b47 | [
"BSD-2-Clause"
] | 5 | 2017-11-14T20:20:56.000Z | 2020-01-22T06:26:51.000Z |
from .changemanager_base import BaseChangeManager
from ..utils.psdict import PsDict
from ..table.tablechanges import TableChanges
from .slot import Slot
import copy
class DictChangeManager(BaseChangeManager):
"""
Manage changes that occured in a DataFrame between runs.
"""
def __init__(self,
slot,
buffer_created=True,
buffer_updated=True,
buffer_deleted=True,
buffer_exposed=False,
buffer_masked=False):
super(DictChangeManager, self).__init__(
slot,
buffer_created,
buffer_updated,
buffer_deleted,
buffer_exposed,
buffer_masked)
self._last_dict = None
data = slot.data()
if data.changes is None:
data.changes = TableChanges()
def reset(self, name=None):
super(DictChangeManager, self).reset(name)
self._last_dict = None
def update(self, run_number, data, mid):
# pylint: disable=unused-argument
assert isinstance(data, PsDict)
if data is None or (run_number != 0 and
run_number <= self._last_update):
return
data.fix_indices()
last_dict = self._last_dict
if last_dict is None:
data.changes.add_created(data.ids)
else:
data.changes.add_created(data.new_indices(last_dict))
data.changes.add_updated(data.updated_indices(last_dict))
data.changes.add_deleted(data.deleted_indices(last_dict))
changes = data.compute_updates(self._last_update, run_number, mid)
self._last_dict = copy.copy(data)
self._last_update = run_number
self._row_changes.combine(changes,
self.created.buffer,
self.updated.buffer,
self.deleted.buffer)
Slot.add_changemanager_type(PsDict, DictChangeManager)
| 33.65 | 74 | 0.594849 | 1,793 | 0.888063 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.052006 |
5b66e22e0d96cd42bf15f9bdcbe03461e565fd2f | 1,813 | py | Python | classifier/classes/data/loaders/Loader.py | canary-for-cognition/multimodal-dl-framework | 54ebd3c6dcdfc48ed619316321d9f0e7a5f0fc9c | [
"MIT"
] | 2 | 2021-08-31T08:58:30.000Z | 2021-09-02T14:32:30.000Z | classifier/classes/data/loaders/Loader.py | canary-for-cognition/multimodal-nn-framework | 7733376b05840e2b3dead438dd3981db9694b6ae | [
"MIT"
] | 5 | 2020-09-22T04:29:25.000Z | 2020-12-20T16:16:47.000Z | classifier/classes/data/loaders/Loader.py | canary-for-cognition/multimodal-nn-framework | 7733376b05840e2b3dead438dd3981db9694b6ae | [
"MIT"
] | 1 | 2021-02-09T18:40:55.000Z | 2021-02-09T18:40:55.000Z | import os
import torch
from classifier.classes.utils.Params import Params
class Loader:
def __init__(self, modality: str, for_submodule: bool = False):
self._modality = modality
self._modality_params = Params.load_modality_params(self._modality)
experiment_params = Params.load_experiment_params()
dataset_params = Params.load_dataset_params(experiment_params["dataset_name"])
self._path_to_modalities = dataset_params["paths"]
self._network_type = experiment_params["train"]["network_type"]
if for_submodule:
multimodal_network_params = Params.load_network_params(self._network_type)
self._network_type = multimodal_network_params["submodules"][self._modality]["architecture"]
path_to_modality = self._path_to_modalities[self._modality]
self._path_to_data = os.path.join(path_to_modality, self._modality_params["path_to_data"])
self._file_format = self._modality_params["file_format"]
def _get_path_to_item(self, path_to_input: str) -> str:
"""
Creates the path to the data item for the specified modality
:param path_to_input: the path to the data item related to the main modality
:return: the path to the eye-tracking sequence data item
"""
split_path = path_to_input.split(os.sep)
file_name = str(split_path[-1]).split(".")[0] + "." + self._file_format
label = str(split_path[-2])
return os.path.join(self._path_to_data, label, file_name)
def load(self, path_to_input: str) -> torch.Tensor:
"""
Loads a data item from the dataset
:param path_to_input: the path to the data item to be loaded (referred to the main modality)
:return: the loaded data item
"""
pass
| 40.288889 | 104 | 0.690017 | 1,734 | 0.956426 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.293436 |
5b67e852602727d6512e5eacbc68b00f61443e8e | 9,486 | py | Python | rel2/bluecat_app/bin/bluecat/entity.py | mheidir/BlueCatSG-SplunkApp-UnOfficial | bd914b8650d191e48c18acda5bdd70aeabb99207 | [
"Apache-2.0"
] | 1 | 2018-06-26T14:57:54.000Z | 2018-06-26T14:57:54.000Z | rel2/bluecat_app/bin/bluecat/entity.py | mheidir/BlueCatSG-SplunkApp-UnOfficial | bd914b8650d191e48c18acda5bdd70aeabb99207 | [
"Apache-2.0"
] | null | null | null | rel2/bluecat_app/bin/bluecat/entity.py | mheidir/BlueCatSG-SplunkApp-UnOfficial | bd914b8650d191e48c18acda5bdd70aeabb99207 | [
"Apache-2.0"
] | null | null | null | from suds import WebFault
from api_exception import api_exception
from util import *
from version import version
from wrappers.generic_setters import *
class entity(object):
"""Instantiate an entity. Entities are hashable and comparable with the = operator.
:param api: API instance used by the entity to communicate with BAM.
:param soap_entity: the SOAP (suds) entity returned by the BAM API.
:param soap_client: the suds client instance.
"""
def __init__(self, api, soap_entity, soap_client, ver=''):
self._api = api
if not ver:
self._version = api.get_version()
else:
self._version = version(ver)
if (self._version >= '8.1.0'):
self._none_parameter = ''
else:
self._none_parameter = None
self._soap_entity = soap_entity
self._soap_client = soap_client
self._properties = {}
self._immutable_properties = ['parentId', 'parentType']
if 'properties' in self._soap_entity and self._soap_entity['properties'] is not None:
self._properties = properties_to_map(self._soap_entity['properties'])
Entity = 'Entity'
Configuration = 'Configuration'
View = 'View'
Zone = 'Zone'
InternalRootZone = 'InternalRootZone'
ZoneTemplate = 'ZoneTemplate'
EnumZone = 'EnumZone'
EnumNumber = 'EnumNumber'
RPZone = 'RPZone'
HostRecord = 'HostRecord'
AliasRecord = 'AliasRecord'
MXRecord = 'MXRecord'
TXTRecord = 'TXTRecord'
SRVRecord = 'SRVRecord'
GenericRecord = 'GenericRecord'
HINFORecord = 'HINFORecord'
NAPTRRecord = 'NAPTRRecord'
RecordWithLink = 'RecordWithLink'
ExternalHostRecord = 'ExternalHostRecord'
StartOfAuthority = 'StartOfAuthority'
IP4Block = 'IP4Block'
IP4Network = 'IP4Network'
IP6Block = 'IP6Block'
IP6Network = 'IP6Network'
IP4NetworkTemplate = 'IP4NetworkTemplate'
DHCP4Range = 'DHCP4Range'
IP4Address = 'IP4Address'
IP6Address = 'IP6Address'
InterfaceID = 'InterfaceID'
MACPool = 'MACPool'
DenyMACPool = 'DenyMACPool'
MACAddress = 'MACAddress'
TagGroup = 'TagGroup'
Tag = 'Tag'
User = 'User'
UserGroup = 'UserGroup'
Server = 'Server'
NetworkServerInterface = 'NetworkServerInterface'
PublishedServerInterface = 'PublishedServerInterface'
NetworkInterface = 'NetworkInterface'
VirtualInterface = 'VirtualInterface'
LDAP = 'LDAP'
Kerberos = 'Kerberos'
Radius = 'Radius'
TFTPGroup = 'TFTPGroup'
TFTPFolder = 'TFTPFolder'
TFTPFile = 'TFTPFile'
TFTPDeploymentRole = 'TFTPDeploymentRole'
DeploymentRole = 'DNSDeploymentRole'
DHCPDeploymentRole = 'DHCPDeploymentRole'
DNSOption = 'DNSOption'
DHCPV4ClientOption = 'DHCPV4ClientOption'
DHCPServiceOption = 'DHCPServiceOption'
DHCPV6ClientOption = 'DHCPV6ClientOption'
DHCPV6ServiceOption = 'DHCPV6ServiceOption'
VendorProfile = 'VendorProfile'
VendorOptionDef = 'VendorOptionDef'
VendorClientOption = 'VendorClientOption'
CustomOptionDef = 'CustomOptionDef'
DHCPMatchClass = 'DHCPMatchClass'
DHCPSubClass = 'DHCPSubClass'
Device = 'Device'
DeviceType = 'DeviceType'
DeviceSubtype = 'DeviceSubtype'
DeploymentScheduler = 'DeploymentScheduler'
IP4ReconciliationPolicy = 'IP4ReconciliationPolicy'
DNSSECSigningPolicy = 'DNSSECSigningPolicy'
IP4IPGroup = 'IP4IPGroup'
ResponsePolicy = 'ResponsePolicy'
KerberosRealm = 'KerberosRealm'
DHCPRawOption = 'DHCPRawOption'
DHCPV6RawOption = 'DHCPV6RawOption'
DNSRawOption = 'DNSRawOption'
DHCP6Range = 'DHCP6Range'
ACL = 'ACL'
TSIGKey = 'TSIGKey'
def __hash__(self):
return hash(self.get_id())
def __eq__(self, other):
return self.get_id() == other.get_id()
def get_url(self):
return self._api.get_url()
def get_id(self):
"""Get the BAM ID of an entity.
"""
return self._soap_entity['id']
def is_null(self):
"""Is this the null entity? (ID == 0).
"""
return 'id' not in self._soap_entity or self._soap_entity['id'] == 0
def get_name(self):
"""Get the BAM name of the entity.
"""
if 'name' in self._soap_entity:
return self._soap_entity['name']
else:
return None
def get_type(self):
"""Get the BAM type of the entity.
"""
return self._soap_entity['type']
def get_properties(self):
"""Get the properties of the entity in the form of a dictionary containing one entry per property.
"""
return self._properties
def get_property(self, name):
"""Get a single named property for the entity or None if not defined.
"""
if name in self._properties:
return self._properties[name]
else:
return None
def get_parent(self):
"""Get the parent entity or None if the entity is at the top of the hierarchy.
"""
try:
res = self._api.instantiate_entity(self._soap_client.service.getParent(self.get_id()), self._soap_client)
return None if res.get_id() == 0 else res
except WebFault as e:
raise api_exception(e.message)
def get_parent_of_type(self, type):
"""Walk up the entity hierarchy and return the first parent entity of the given type or, if none was found, None
"""
parent = self
count = 0
while count < 100:
parent = parent.get_parent()
if parent.is_null():
raise api_exception('No parent of type %s found.' % type)
if parent.get_type() == type:
try:
return self._api.instantiate_entity(self._soap_client.service.getEntityById(parent.get_id()),
self._soap_client)
except WebFault as e:
raise api_exception(e.message)
if count >= 100:
raise api_exception('API failure, no parent of type %s found.' % type)
def get_children_of_type(self, type, max_results=500):
"""Get all the immediate children of an entity of the given type.
"""
try:
res = []
s = self._soap_client.service.getEntities(self.get_id(), type, 0, max_results)
if not has_response(s):
return res
else:
for dr in s.item:
res.append(self._api.instantiate_entity(dr, self._soap_client))
return res
except WebFault as e:
raise api_exception(e.message)
def get_linked_entities(self, type, max_results=500):
"""Get all the linked entities of a given type
"""
try:
res = []
s = self._soap_client.service.getLinkedEntities(self.get_id(), type, 0, max_results)
if not has_response(s):
return res
else:
for dr in s.item:
res.append(self._api.instantiate_entity(dr, self._soap_client))
return res
except WebFault as e:
raise api_exception(e.message)
def get_child_by_name(self, name, type):
"""Get a specific named immediate child entity of a given type.
"""
try:
res = self._soap_client.service.getEntityByName(self.get_id(), name, type)
if not has_response(res):
return None
else:
return self._api.instantiate_entity(res, self._soap_client)
except WebFault as e:
raise api_exception(e.message)
def set_property(self, name, value):
"""Set a property value. The change is not persisted until update() is called.
"""
self._properties[name] = value
def update(self):
"""Persist any changes to the entity to the BAM database.
"""
s = ''
for k, v in self._properties.items():
if k not in self._immutable_properties:
s += k + '=' + v + '|'
self._soap_entity['properties'] = s
try:
self._soap_client.service.update(self._soap_entity)
except WebFault as e:
raise api_exception(e.message)
def delete(self):
"""Delete the entity from the BAM database.
"""
try:
delete_entity(self._soap_client, self.get_id(), self._version)
except WebFault as e:
raise api_exception(e.message)
def dump(self):
"""Dump out details of the entity to stdout. Useful for debug.
"""
print self._soap_entity
def get_deployment_roles(self, types=[]):
"""Get deployment roles for the entity.
:param types: An optional list of deployment role types (documented in the deployment_role class). If the list is empty all types are returned.
"""
try:
res = []
s = self._soap_client.service.getDeploymentRoles(self.get_id())
if has_response(s):
for dr in self._soap_client.service.getDeploymentRoles(self.get_id()).item:
if len(types) > 0 and dr['type'] in types:
res.append(self._api.instantiate_entity(dr, self._soap_client))
return res
except WebFault as e:
raise api_exception(e.message)
| 34.747253 | 151 | 0.613641 | 9,330 | 0.983555 | 0 | 0 | 0 | 0 | 0 | 0 | 2,822 | 0.297491 |
5b695471ab29b797681a7c5ba85992682e02db15 | 137 | py | Python | users/admin.py | rossm6/accounts | 74633ce4038806222048d85ef9dfe97a957a6a71 | [
"MIT"
] | 11 | 2021-01-23T01:09:54.000Z | 2021-01-25T07:16:30.000Z | users/admin.py | rossm6/accounts | 74633ce4038806222048d85ef9dfe97a957a6a71 | [
"MIT"
] | 7 | 2021-04-06T18:19:10.000Z | 2021-09-22T19:45:03.000Z | users/admin.py | rossm6/accounts | 74633ce4038806222048d85ef9dfe97a957a6a71 | [
"MIT"
] | 3 | 2021-01-23T18:55:32.000Z | 2021-02-16T17:47:59.000Z | from django.contrib import admin
from users.models import Lock, UserSession
admin.site.register(Lock)
admin.site.register(UserSession)
| 19.571429 | 42 | 0.824818 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5b6a869041b8f7099f1366b7e23ffabaa5619e62 | 1,083 | py | Python | selenium/load-html-from-string-instead-of-url/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | selenium/load-html-from-string-instead-of-url/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | selenium/load-html-from-string-instead-of-url/main.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z | #!/usr/bin/env python3
# date: 2019.11.24
import selenium.webdriver
driver = selenium.webdriver.Firefox()
html_content = """
<div class=div1>
<ul>
<li>
<a href='path/to/div1stuff/1'>Generic string 1</a>
<a href='path/to/div1stuff/2'>Generic string 2</a>
<a href='path/to/div1stuff/3'>Generic string 3</a>
</li>
</ul>
</div>
<div class=div2>
<ul>
<li>
<a href='path/to/div2stuff/1'>Generic string 1</a>
<a href='path/to/div2stuff/2'>Generic string 2</a>
<a href='path/to/div2stuff/3'>Generic string 3</a>
</li>
</ul>
</div>
"""
driver.get("data:text/html;charset=utf-8," + html_content)
elements = driver.find_elements_by_css_selector("div.div2 a")
for x in elements:
print(x.get_attribute('href'))
item = driver.find_element_by_xpath("//div[@class='div2']//a[contains(text(),'Generic string 2')]")
print(item.get_attribute('href'))
item.click()
| 26.414634 | 99 | 0.54663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 780 | 0.720222 |
5b6ad3dccf84bb8ff4fe870c3a1a16f0ee7630a4 | 263 | py | Python | Python_codes/palindrome_string/palindrome.py | latedeveloper08/hacktober2021 | 3b2b4781668221b32dd96e2335157572b00dcf56 | [
"MIT"
] | null | null | null | Python_codes/palindrome_string/palindrome.py | latedeveloper08/hacktober2021 | 3b2b4781668221b32dd96e2335157572b00dcf56 | [
"MIT"
] | null | null | null | Python_codes/palindrome_string/palindrome.py | latedeveloper08/hacktober2021 | 3b2b4781668221b32dd96e2335157572b00dcf56 | [
"MIT"
] | null | null | null | string=input("Enter a string:")
length=len(string)
mid=length//2
rev=-1
for a in range(mid):
if string[a]==string[rev]:
a+=1
rev=-1
else:
print(string,"is a palindrome")
break
else:
print(string,"is not a palindrome")
| 17.533333 | 39 | 0.585551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 55 | 0.209125 |
5b6e0169ead672709cd2da73eb412706af67a106 | 3,841 | py | Python | PythonClient/Framework/ModCameraGimble.py | SweetShot/AirSim | d43269f9387fdac03298d14416ecf6af43b6fd12 | [
"MIT"
] | null | null | null | PythonClient/Framework/ModCameraGimble.py | SweetShot/AirSim | d43269f9387fdac03298d14416ecf6af43b6fd12 | [
"MIT"
] | null | null | null | PythonClient/Framework/ModCameraGimble.py | SweetShot/AirSim | d43269f9387fdac03298d14416ecf6af43b6fd12 | [
"MIT"
] | 2 | 2018-03-07T18:23:42.000Z | 2020-02-12T19:58:32.000Z | from ModBase import *
import setup_path
import airsim
import copy
class CameraOrientations:
def __init__(self, id, pitch = 0, roll = 0, yaw = 0): # wrt body
self.id = id
self.pitch = pitch * 3.14/180
self.roll = roll * 3.14/180
self.yaw = yaw * 3.14/180
self.user = None
self.enabled = False
def set_gimble_params(self, user, pitch, roll, yaw): # In degrees
if self.user == None:
self.user = user
self.pitch = pitch * 3.14/180
self.roll = roll * 3.14/180
self.yaw = yaw * 3.14/180
else:
raise ValueError("{0} is controlling gimble on camera {1}".format(self.user, self.id))
def enable_gimble(self, user):
if self.user == None:
raise ValueError("No camera {0} user, please set params first".format(self.id))
if self.user != user:
raise ValueError("Other user is controlling gimble on this camera, {0}".format(self.user))
self.enabled = True
def disable_gimble(self, user):
if self.user == None:
return
if self.user != user:
raise ValueError("Other user is controlling gimble on this camera, {0}".format(self.user))
self.enabled = False
self.user = None
class ModCameraGimble(ModBase):
def __init__(self, controller):
super().__init__(controller)
self.constant_module = self.get_persistent_module('constants')
self.cameras = [0 for i in range(self.constant_module.no_of_cameras)]
self.gimble_max_angle = 30 * 3.14 / 180 # 30 deg
# get orientations
for i in range(self.constant_module.no_of_cameras):
camera_info = self.get_client().getCameraInfo(i)
pitch, roll, yaw = AirSimClientBase.toEulerianAngle(camera_info.pose.orientation)
self.cameras[i] = CameraOrientations(i, pitch, roll, yaw)
print("Camera {0} ori ({1:.2f} {2:.2f} {3:.2f})".format(
i, self.cameras[i].pitch, self.cameras[i].roll, self.cameras[i].yaw,
))
def get_name():
return 'camera_gimble'
def get_camera(self, id):
return self.cameras[id]
def set_gimble_max_angle(self, angle): # in degrees
self.gimble_max_angle = angle * 3.14 / 180
def start(self):
super().start()
self.mystate_module = self.get_persistent_module('mystate')
def _cap(self, angle):
return max(min(angle, self.gimble_max_angle), -self.gimble_max_angle)
def update(self):
#camera_angles = AirSimClientBase.toEulerianAngle(self.get_client().getCameraInfo(4).pose.orientation)
drone_angles = AirSimClientBase.toEulerianAngle(self.mystate_module.get_state().kinematics_true.orientation)
# print("{0}\n{1}\n{2}\n{3}\n{4}\n".format(
# camera_angles,
# drone_angles,
# self.mystate_module.get_state().kinematics_true.orientation,
# AirSimClientBase.toQuaternion(drone_angles[0], drone_angles[1], drone_angles[2]),
# AirSimClientBase.toEulerianAngle(AirSimClientBase.toQuaternion(-45 * 3.14 / 180, 0, drone_angles[2]))
# ))
for cam in self.cameras:
if cam.enabled:
self.log("Setting Camera {0} orientation to ({1:.2f} {2:.2f} {3:.2f})".format(
cam.id, cam.pitch - self._cap(drone_angles[0]),
cam.roll - self._cap(drone_angles[1]), cam.yaw
))
# - self._cap(drone_angles[1])
self.get_client().setCameraOrientation(cam.id,
AirSimClientBase.toQuaternion(cam.pitch - self._cap(drone_angles[0]),
cam.roll - self._cap(drone_angles[1]), cam.yaw))
def stop(self):
super().stop()
| 40.431579 | 116 | 0.601146 | 3,770 | 0.981515 | 0 | 0 | 0 | 0 | 0 | 0 | 870 | 0.226504 |
5b6fbfdce0aeea51fda85018f63eb008c4350c82 | 174 | py | Python | gluoncv/data/video_custom/__init__.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 5,447 | 2018-04-25T18:02:51.000Z | 2022-03-31T00:59:49.000Z | gluoncv/data/video_custom/__init__.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,566 | 2018-04-25T21:14:04.000Z | 2022-03-31T06:42:42.000Z | gluoncv/data/video_custom/__init__.py | Kh4L/gluon-cv | 849411ed56632cd854850b07142087d599f97dcb | [
"Apache-2.0"
] | 1,345 | 2018-04-25T18:44:13.000Z | 2022-03-30T19:32:53.000Z | # pylint: disable=wildcard-import
"""
Customized data loader for video classification related tasks.
"""
from __future__ import absolute_import
from .classification import *
| 24.857143 | 62 | 0.804598 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 103 | 0.591954 |
5b6fc9e9dc0aa155879b2501fe4a3f2cf129de50 | 809 | py | Python | CAV Test and Evaluation/Scaled-vehicle-in-the-loop/catkin_ws/src/tongjirc/script/talker.py | tongjirc/Intelligent-Vehicle-and-Road | 1c6b35612007bb0a20aafd39e5ec9ab9d67ec2e3 | [
"MIT"
] | 3 | 2019-07-22T01:19:25.000Z | 2020-11-16T02:42:07.000Z | CAV Test and Evaluation/Scaled-vehicle-in-the-loop/catkin_ws/src/tongjirc/script/talker.py | tongjirc/Intelligent-Vehicle-and-Road | 1c6b35612007bb0a20aafd39e5ec9ab9d67ec2e3 | [
"MIT"
] | null | null | null | CAV Test and Evaluation/Scaled-vehicle-in-the-loop/catkin_ws/src/tongjirc/script/talker.py | tongjirc/Intelligent-Vehicle-and-Road | 1c6b35612007bb0a20aafd39e5ec9ab9d67ec2e3 | [
"MIT"
] | 1 | 2020-11-16T02:42:08.000Z | 2020-11-16T02:42:08.000Z | #!/usr/bin/env python2
#created by Alvin zixuan tongjirc
#Oct. 1th 2018
import rospy
import time
from std_msgs.msg import String,Duration
from trajectory_msgs.msg import JointTrajectoryPoint
jp=JointTrajectoryPoint()
def talker():
pub = rospy.Publisher('status', JointTrajectoryPoint, queue_size=10)
rospy.init_node('vehicleclient', anonymous=True)
rate = rospy.Rate(10) # 10hz
while not rospy.is_shutdown():
jp.positions=[1,2,3,4]
jp.velocities = [5, 6, 7,8]
jp.accelerations = [9, 10]
jp.effort = [1]
# rospy.Time.now().to_nsec()
jp.time_from_start=rospy.Duration.from_sec(time.time())
pub.publish(jp)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
| 26.096774 | 72 | 0.660074 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 137 | 0.169345 |
5b728343141af718346ac4978505ec35a2d52270 | 470 | py | Python | problems/greedy/Solution870.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/greedy/Solution870.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | problems/greedy/Solution870.py | akalu/cs-problems-python | 9b1bd8e3932be62135a38a77f955ded9a766b654 | [
"MIT"
] | null | null | null | """ Given two arrays A and B of equal size, the advantage of A with respect to B
is the number of indices i for which A[i] > B[i].
Return any permutation of A that maximizes its advantage with respect to B.
Example 1:
Input: A = [2,7,11,15], B = [1,10,4,11] Output: [2,11,7,15]
IDEA:
Only relative order does matter, so one can apply greedy approach here
Sort both arrays, and find the
"""
class Solution870:
pass
| 23.5 | 82 | 0.63617 | 27 | 0.057447 | 0 | 0 | 0 | 0 | 0 | 0 | 440 | 0.93617 |
5b7391d6b2a463ec5ffec394ba3d99bf008d1599 | 1,182 | py | Python | src/authentication/migrations/0003_user_additional_fiels_nullable.py | Alirezaja1384/MajazAmooz | 9200e46bed33aeb60d578a5c4c02013a8032cf08 | [
"MIT"
] | 3 | 2021-04-01T19:42:53.000Z | 2022-03-01T09:50:17.000Z | src/authentication/migrations/0003_user_additional_fiels_nullable.py | Alirezaja1384/MajazAmooz | 9200e46bed33aeb60d578a5c4c02013a8032cf08 | [
"MIT"
] | null | null | null | src/authentication/migrations/0003_user_additional_fiels_nullable.py | Alirezaja1384/MajazAmooz | 9200e46bed33aeb60d578a5c4c02013a8032cf08 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.7 on 2021-03-27 10:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("authentication", "0002_auto_20210326_1814"),
]
operations = [
migrations.AlterField(
model_name="user",
name="avatar",
field=models.ImageField(
blank=True,
upload_to="images/avatars",
verbose_name="تصویر پروفایل",
),
),
migrations.AlterField(
model_name="user",
name="coins",
field=models.PositiveIntegerField(
blank=True, null=True, verbose_name="سکه ها"
),
),
migrations.AlterField(
model_name="user",
name="diamonds",
field=models.PositiveIntegerField(
blank=True, null=True, verbose_name="الماس ها"
),
),
migrations.AlterField(
model_name="user",
name="scores",
field=models.PositiveIntegerField(
blank=True, null=True, verbose_name="امتیاز"
),
),
]
| 26.863636 | 62 | 0.51692 | 1,119 | 0.923267 | 0 | 0 | 0 | 0 | 0 | 0 | 232 | 0.191419 |
5b73e195d32533a2f980dd3d40f7b2ac22ca9c43 | 757 | py | Python | src/hyperloop/Python/mission/tests/test_lat_long.py | jcchin/Hyperloop_v2 | 73861d2207af8738425c1d484909ed0433b9653f | [
"Apache-2.0"
] | 1 | 2021-04-29T00:23:03.000Z | 2021-04-29T00:23:03.000Z | src/hyperloop/Python/mission/tests/test_lat_long.py | jcchin/Hyperloop_v2 | 73861d2207af8738425c1d484909ed0433b9653f | [
"Apache-2.0"
] | 1 | 2018-10-10T04:06:34.000Z | 2018-10-10T04:06:34.000Z | src/hyperloop/Python/mission/tests/test_lat_long.py | jcchin/Hyperloop_v2 | 73861d2207af8738425c1d484909ed0433b9653f | [
"Apache-2.0"
] | 11 | 2016-01-19T20:26:35.000Z | 2021-02-13T11:16:20.000Z | import pytest
from hyperloop.Python.mission import lat_long
import numpy as np
from openmdao.api import Group, Problem
def create_problem(component):
root = Group()
prob = Problem(root)
prob.root.add('comp', component)
return prob
class TestMissionDrag(object):
def test_case1_vs_npss(self):
component = lat_long.LatLong()
prob = create_problem(component)
prob.setup()
prob['comp.x'] = 100.0
prob['comp.y'] = 100.0
prob['comp.lat_origin'] = 35.0
prob['comp.long_origin'] = -121.0
prob['comp.R_E'] = 6378.0
prob.run()
assert np.isclose(prob['comp.lat'], 35.898335, rtol = 0.01)
assert np.isclose(prob['comp.long'], -119.891025, rtol = 0.01) | 25.233333 | 70 | 0.623514 | 508 | 0.67107 | 0 | 0 | 0 | 0 | 0 | 0 | 88 | 0.116248 |
5b74a7016dc6843c0a1ceebedf0fe8627754ec1f | 451 | py | Python | answers/ex10.py | metmirr/project-euler | 2432e6fc392d5a3dfc87b331ad2246f8e84c1f12 | [
"MIT"
] | 1 | 2020-01-09T14:57:28.000Z | 2020-01-09T14:57:28.000Z | answers/ex10.py | metmirr/project-euler | 2432e6fc392d5a3dfc87b331ad2246f8e84c1f12 | [
"MIT"
] | null | null | null | answers/ex10.py | metmirr/project-euler | 2432e6fc392d5a3dfc87b331ad2246f8e84c1f12 | [
"MIT"
] | null | null | null | """
Problem 10:
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.Find the sum of
all the primes below two million.
"""
sum = 0
size = 2000000
slots = [True for i in range(size)]
slots[0] = False
slots[1] = False
for stride in range(2, size // 2):
pos = stride
while pos < size - stride:
pos += stride
slots[pos] = False
for idx, pr in enumerate(slots):
if pr:
sum += idx
print('answer:', sum)
| 17.346154 | 72 | 0.576497 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.317073 |
5b74ca87ccbb7137e1d1ff7086010eae374775ba | 1,646 | py | Python | src/oci_cli/cli_clients.py | honzajavorek/oci-cli | 6ea058afba323c6b3b70e98212ffaebb0d31985e | [
"Apache-2.0"
] | null | null | null | src/oci_cli/cli_clients.py | honzajavorek/oci-cli | 6ea058afba323c6b3b70e98212ffaebb0d31985e | [
"Apache-2.0"
] | null | null | null | src/oci_cli/cli_clients.py | honzajavorek/oci-cli | 6ea058afba323c6b3b70e98212ffaebb0d31985e | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
import os
import pkgutil
from os.path import abspath
from inspect import getsourcefile
CLIENT_MAP = {}
MODULE_TO_TYPE_MAPPINGS = {}
ALL_SERVICES_DIR = "services"
this_file_path = abspath(getsourcefile(lambda: 0))
if "site-packages" in this_file_path or "dist-packages" in this_file_path:
python_cli_root_dir = this_file_path[0:this_file_path.index("oci_cli")]
else:
python_cli_root_dir = this_file_path[0:this_file_path.index("/src/oci_cli")]
services_dir = os.path.join(python_cli_root_dir, ALL_SERVICES_DIR)
# Import client mappings from platformization directories.
# This imports the generated client_mappings which populates CLIENT_MAP and MODULE_TO_TYPE_MAPPINGS.
for importer1, modname1, ispkg1 in pkgutil.iter_modules(path=[services_dir]):
for importer, modname, ispkg in pkgutil.iter_modules(path=[services_dir + '/' + modname1 + '/src']):
if ispkg and modname.startswith("oci_cli_"):
oci_cli_module_name = modname.split(".")[0]
service_name = oci_cli_module_name[8:]
oci_cli_module = __import__(ALL_SERVICES_DIR + '.' + modname1 + '.src.' + oci_cli_module_name)
services_dir = oci_cli_module.__path__[0]
service_dir = os.path.join(services_dir, modname1, 'src', oci_cli_module_name)
generated_module = "client_mappings"
if os.path.isfile(os.path.join(service_dir, 'generated', generated_module + ".py")):
__import__(ALL_SERVICES_DIR + '.' + modname1 + '.src.' + oci_cli_module_name + ".generated." + generated_module)
| 51.4375 | 128 | 0.727825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 407 | 0.247266 |
5b74fbbc3bd037c8121ea683c08bcace2932b036 | 26,767 | py | Python | testing/unittest_analysis.py | dsg-bielefeld/mumodo | 0f220ada888f5e464344f4150be6d8cd454b72a1 | [
"MIT"
] | 1 | 2020-04-27T06:47:12.000Z | 2020-04-27T06:47:12.000Z | testing/unittest_analysis.py | clp-research/mumodo | 0f220ada888f5e464344f4150be6d8cd454b72a1 | [
"MIT"
] | 1 | 2015-04-26T22:52:27.000Z | 2017-06-07T13:06:32.000Z | testing/unittest_analysis.py | dsg-bielefeld/mumodo | 0f220ada888f5e464344f4150be6d8cd454b72a1 | [
"MIT"
] | 1 | 2015-07-14T14:51:36.000Z | 2015-07-14T14:51:36.000Z | import unittest
import pandas as pd
from mumodo.mumodoIO import open_intervalframe_from_textgrid, \
open_streamframe_from_xiofile
from mumodo.analysis import intervalframe_overlaps, intervalframe_union, \
invert_intervalframe, \
create_intervalframe_from_streamframe, \
slice_streamframe_on_intervals, \
slice_intervalframe_by_time, \
convert_times_of_tier, convert_times_of_tiers, \
shift_tier, shift_tiers, get_tier_type, \
get_tier_boundaries, join_intervals_by_label, \
join_intervals_by_time, \
create_streamframe_from_intervalframe, \
slice_pointframe_by_time
class AnalysisTest(unittest.TestCase):
def setUp(self):
self.dict1 = open_intervalframe_from_textgrid('data/r1-20120704-cam1-'
'head-sk'
'.TextGrid', 'utf-8')
self.dict2 = open_intervalframe_from_textgrid('data/r1-20120704-cam1-'
'head-zm.TextGrid',
'utf-8')
self.dict3 = open_intervalframe_from_textgrid('data/mytextgrid'
'.TextGrid', 'utf-8')
self.dict4 = dict()
self.dict4['speaker A'] = self.dict3['speaker A'][0:4].copy(deep=True)
self.dict4['speaker B'] = self.dict3['speaker B'][0:4].copy(deep=True)
shift_tiers(self.dict4, 1)
self.tier1 = self.dict1['HeadSK']
self.tier2 = self.dict2['HeadZM']
self.stream = open_streamframe_from_xiofile('data/fseeksmaller.xio.gz',
"lab-labtop/irioKinect 2",
window_size=5,
with_fields=None,
without_fields=None,
discard_duplicates=True,
start_time=0,
end_time=500,
timestamp_offset=None)
self.ifr = create_intervalframe_from_streamframe(self.stream,
'soundAngle',
lambda x: True \
if x <- 0.7323895 \
else False,
10)
self.iframe = open_intervalframe_from_textgrid('data/mytextgrid.Text'
'Grid', 'utf-8')
self.speaker_a = self.iframe['speaker A']
self.speaker_b = self.iframe['speaker B']
self.overlap_ab_shouldbe = [{'end_time': 3.47134775993, 'start_time':
3.4540353413, 'text': u'exactly and at '
'the very end of the corridor is just '
'the bathroom/I see yes'},
{'end_time': 3.8781895978200001, 'start_'
'time': 3.7223778301200001,
'text': u'exactly and at the very end of '
'the corridor is just the bathroom/yes'},
{'end_time': 7.3163672878900003,
'start_time': 6.9095254500000003,
'text': u'that you quasi such have so '
'zack zack zack/yes'},
{'end_time': 7.5933659860300002,
'start_time': 7.4029293810599999,
'text': u'that you quasi such have so '
'zack zack zack/yes'},
{'end_time': 11.168380433799999,
'start_time': 10.8740693171,
'text': u'know you ?/yes I think I'},
{'end_time': 11.644471946299999,
'start_time': 11.2895673643,
'text': u'if this the corridor is here a '
'room there a room there a room '
'there and above/yes I think I'},
{'end_time': 14.892213031400001,
'start_time': 13.1073713208,
'text': u'if this the corridor is here a '
'room there a room there a room there '
'and above/there a room there there and '
'then there yes'},
{'end_time': 15.558741148799999,
'start_time': 15.376960753100001,
'text': u'that is perfect/yes'},
{'end_time': 17.679512431399999,
'start_time': 17.4544509891,
'text': u'yes/sure'},
{'end_time': 27.8435647606,
'start_time': 27.5492536438,
'text': u'twentyfive/so I would already '
'gladly a large room have'}]
self.right_overlap_ab = pd.DataFrame(self.overlap_ab_shouldbe,
columns=['start_time', 'end_time',
'text'])
self.union_dict = [{'end_time': 2.5191647350899999, 'start_time': 0.0,
'text': 'union'},
{'end_time': 6.1304666115000002, 'start_time': \
2.80481964254,
'text': 'union'},
{'end_time': 9.5669817102499994,
'start_time': 6.7796823102500001,
'text': 'union'},
{'end_time': 15.0393685898, 'start_time': \
10.7528823866, 'text': 'union'},
{'end_time': 16.1646758009,
'start_time': 15.281742450599999,
'text': 'union'},
{'end_time': 17.3159516401,
'start_time': 17.064921569900001,
'text': 'union'},
{'end_time': 17.714137268599998,
'start_time': 17.4457947798,
'text': 'union'},
{'end_time': 19.393441876099999,
'start_time': 17.826667989699999,
'text': 'union'}]
self.union_mustbe = pd.DataFrame(self.union_dict, columns=['start_'
'time', 'end_time', 'text'])
self.invert = [{'start_time': 0., 'end_time': 547.016,
'text': 'nod'},
{'start_time': 547.464, 'end_time': 549.507,
'text': 'nod/turn-aw'},
{'start_time': 549.988, 'end_time': 556.808,
'text': 'turn-aw/nod-2'},
{'start_time': 557.404, 'end_time': 561.428,
'text': 'nod-2/nod-3'},
{'start_time': 563.345, 'end_time': 600.000,
'text': 'nod-3'}]
self.inversion = pd.DataFrame(self.invert, columns=['start_'
'time', 'end_time', 'text'])
self.tier1_inverted = [{'end_time': 547.01599999999996,
'start_time': 0.0, 'text': u'nod'},
{'end_time': 549.50699999999995,
'start_time': 547.46400000000006,
'text': u'nod/turn-aw'},
{'end_time': 556.80799999999999,
'start_time': 549.98800000000006,
'text': u'turn-aw/nod-2'},
{'end_time': 561.428, 'start_time': 557.404,
'text': u'nod-2/nod-3'},
{'end_time': 567.51099999999997,
'start_time': 563.34500000000003,
'text': u'nod-3/slide-right'},
{'end_time': 578.17100000000005,
'start_time': 568.02300000000002,
'text': u'slide-right/nod-4'},
{'end_time': 586.03599999999994,
'start_time': 579.53499999999997,
'text': u'nod-4/nod-3'},
{'end_time': 604.73699999999997,
'start_time': 587.10699999999997,
'text': u'nod-3/nod-4-turn-aw-tw'},
{'end_time': 609.11800000000005,
'start_time': 607.35699999999997,
'text': u'nod-4-turn-aw-tw/nod-4'},
{'end_time': 617.30899999999997,
'start_time': 611.16600000000005,
'text': u'nod-4/nod-5'}]
self.invert_tier1 = pd.DataFrame(self.tier1_inverted,
columns=['start_time', 'end_time', 'text'])
self.empty = pd.DataFrame([])
self.withna = [{'start_time': 3, 'end_time': 6, 'text': 'hi'},
{'start_time': 7, 'end_time': 8.5},
{'start_time': 9, 'end_time': 13, 'text': 'bye'}]
self.emptyint = pd.DataFrame(self.withna, columns=['start_time',
'end_time', 'text'])
self.emptyshouldbe = [{'end_time': 3.0, 'start_time': 0, 'text': 'hi'},
{'end_time': 7.0, 'start_time': 6,
'text': 'hi/empty'},
{'end_time': 9.0, 'start_time': 8.5,
'text': 'empty/bye'}]
self.emptyshouldbepd = pd.DataFrame(self.emptyshouldbe, columns= \
['start_time', 'end_time', 'text'])
self.no_overlap1 = [{'start_time': 3, 'end_time': 5, 'text': 'eins'},
{'start_time': 9, 'end_time': 15, 'text': 'zwei'},
{'start_time': 19, 'end_time': 21, 'text': 'drei'}]
self.no_overlap2 = [{'start_time': 0, 'end_time': 2.5, 'text': 'vier'},
{'start_time': 6, 'end_time': 7, 'text': 'fuenf'},
{'start_time': 23, 'end_time': 27, 'text': 'sechs'}]
self.pd_no_overlap1 = pd.DataFrame(self.no_overlap1,
columns=['start_time', 'end_time', 'text'])
self.pd_no_overlap2 = pd.DataFrame(self.no_overlap2,
columns=['start_time', 'end_time', 'text'])
self.convert = [{'start_time': 4, 'end_time': 5, 'text': 'eins'},
{'start_time': 9, 'end_time': 15, 'text': 'zwei'},
{'start_time': 19, 'end_time': 21, 'text': 'drei'}]
self.convert_pd = pd.DataFrame(self.convert,
columns=['start_time', 'end_time', 'text'])
convert_times_of_tier(self.convert_pd, lambda y: int(1000 * y))
self.converted = [{'start_time': 4000, 'end_time': 5000,
'text': 'eins'}, {'start_time': 9000,
'end_time': 15000, 'text': 'zwei'},
{'start_time': 19000, 'end_time': 21000,
'text': 'drei'}]
self.converted_pd = pd.DataFrame(self.converted,
columns=['start_time', 'end_time', 'text'])
self.tiers_shifted_a = [{'end_time': 4.47134775993,
'start_time': 3.80481964254,
'text': u'I see yes'},
{'end_time':4.8781895978200001,
'start_time': 4.7223778301200001,
'text': u'yes'},
{'end_time': 8.3163672878900003,
'start_time': 7.9095254500000003,
'text': u'yes'},
{'end_time': 8.5933659860300002,
'start_time': 8.4029293810599999,
'text': u'yes'}]
self.tiers_shifted_pd = pd.DataFrame(self.tiers_shifted_a,
columns=['start_time', 'end_time', 'text'])
self.shift_dict = [{'start_time': 3, 'end_time': 5, 'text': 'eins'},
{'start_time': 9, 'end_time': 15, 'text': 'zwei'},
{'start_time': 19, 'end_time': 21, 'text': 'drei'}]
self.shift = pd.DataFrame(self.shift_dict,
columns=['start_time', 'end_time', 'text'])
shift_tier(self.shift, 1)
self.shifted_dict = [{'start_time': 4, 'end_time': 6, 'text': 'eins'},
{'start_time': 10, 'end_time': 16, 'text': 'zwei'},
{'start_time': 20, 'end_time': 22, 'text': 'drei'}]
self.shifted = pd.DataFrame(self.shifted_dict,
columns=['start_time', 'end_time', 'text'])
for col in self.dict4['speaker A'].columns:
self.dict4['speaker A'].loc[:, col] = \
self.dict4['speaker A'][col].map(lambda x: str(x))
for col in self.tiers_shifted_pd.columns:
self.tiers_shifted_pd.loc[:, col] = \
self.tiers_shifted_pd[col].map(lambda x: str(x))
self.withPoint = open_intervalframe_from_textgrid('data/r1_12_15with'
'Point.TextGrid',
'utf-8')
self.points = self.withPoint['P'][0:4].copy(deep=True)
shift_tier(self.points, 10)
for col in self.points.columns:
self.points.loc[:, col] = self.points[col].map(lambda x: str(x))
self.points_shifted = [{'mark': 'A', 'time': 12.804819642542952},
{'mark': 'B', 'time': 13.454035341299345},
{'mark': 'A', 'time': 13.722377830118717},
{'mark': 'B', 'time': 16.779682310252952}]
self.points_shifted_pd = pd.DataFrame(self.points_shifted,
columns=['time', 'mark'])
for col in self.points_shifted_pd.columns:
self.points_shifted_pd.loc[:, col] = \
self.points_shifted_pd[col].map(lambda x: str(x))
self.labeljoin = open_intervalframe_from_textgrid('data/joinlabels'
'.TextGrid', 'utf-8')
self.streamdict1 = [{'value': u'nod_start'},
{'value': u'nod_end'},
{'value': u'turn-aw_start'},
{'value': u'turn-aw_end'},
{'value': u'nod-2_start'},
{'value': u'nod-2_end'},
{'value': u'nod-3_start'},
{'value': u'nod-3'},
{'value': u'nod-3_end'}]
self.stream1 = pd.DataFrame(self.streamdict1, columns=['value'])
self.stream1.index = [547.016, 547.464, 549.507, 549.988, 556.808,
557.404, 561.428, 562.428, 563.345]
self.inv1 = [{'end_time': '547.016', 'start_time': '300.0',
'text': u'nod'},
{'end_time': '549.507', 'start_time': '547.464',
'text': u'nod/turn-aw'},
{'end_time': '556.808', 'start_time': '549.988',
'text': u'turn-aw/nod-2'},
{'end_time': '561.428', 'start_time': '557.404',
'text': u'nod-2/nod-3'},
{'end_time': '700.0', 'start_time': '563.345',
'text': u'nod-3'}]
self.inverted1 = pd.DataFrame(self.inv1,
columns=['start_time', 'end_time', 'text'])
self.inv1_str = invert_intervalframe(self.tier1[0:4], 300, 700)
for col in self.inv1_str:
self.inv1_str.loc[:, col] = \
self.inv1_str[col].map(lambda x: str(x))
self.inv_default_conc = [{'end_time': '549.507',
'start_time': '547.464', 'text': u'nod'},
{'end_time': '556.808',
'start_time': '549.988', 'text': u'turn-aw+nod-2'},
{'end_time': '561.428',
'start_time': '557.404', 'text': u'nod-2'}]
self.inv_def_pd = pd.DataFrame(self.inv_default_conc,
columns=['start_time', 'end_time', 'text'])
self.inv_def_str = invert_intervalframe(self.tier1[0:4],
concat_delimiter='+')
for col in self.inv_def_str:
self.inv_def_str.loc[:, col] = \
self.inv_def_str[col].map(lambda x: str(x))
def test_overlaps(self):
self.failUnlessEqual((intervalframe_overlaps(self.pd_no_overlap1,
self.pd_no_overlap2) == \
pd.DataFrame(columns=['start_time', 'end_time',
'text'])).all().all(), True)
self.failUnlessEqual(intervalframe_overlaps(self.tier1,
self.tier2).ix[0]['start_time'],
547.01599999999996)
self.failUnlessEqual(intervalframe_overlaps(self.tier1,
self.tier2).ix[0]['end_time'],
547.090)
self.failUnlessEqual(intervalframe_overlaps(self.tier1,
self.tier2).ix[3]['start_time'],
561.480)
self.failUnlessEqual(intervalframe_overlaps(self.tier1,
self.tier2).ix[3]['end_time'],
563.345)
self.failUnlessEqual(intervalframe_overlaps(self.tier1,
self.tier2).ix[0]['text'],
u'nod/turn-1-tw')
self.failUnlessEqual(intervalframe_overlaps(self.tier1,
self.tier2).ix[1]['text'],
u'turn-aw/turn-1-aw')
self.failUnlessEqual(intervalframe_overlaps(self.tier1,
self.tier2, False).ix[1]['text'],
u'overlap')
self.failUnlessEqual((intervalframe_overlaps(self.speaker_a,
self.speaker_b).ix[0:9] == \
self.right_overlap_ab).all().all(), True)
def test_union(self):
self.failUnlessEqual((intervalframe_union(self.speaker_a,
self.speaker_b).iloc[:, 0:2][0:8] == \
self.union_mustbe.iloc[:, 0:2]).all().all(),
True)
self.failUnlessEqual(intervalframe_union(self.tier1[0:5],
self.tier2[0:5]).ix[0]['end_time'],
547.464)
self.failUnlessEqual(intervalframe_union(self.tier1[0:5],
self.tier2[0:5]).ix[4]['start_time'],
561.428)
def test_slicing(self):
self.failUnlessEqual(slice_streamframe_on_intervals(self.stream,
self.ifr)['framenumber'][1341393414961],
45776.0)
self.failUnlessEqual(slice_intervalframe_by_time(self.tier1,
578.171,
698.440).index[0],
5)
self.failUnlessEqual(slice_intervalframe_by_time(self.tier1,
578.171,
698.440).index[-1],
21)
self.failUnlessEqual(slice_pointframe_by_time(self.withPoint['P'],
10, 100)['mark'].iloc[0],
'B')
def test_inversion(self):
self.failUnlessEqual((invert_intervalframe(self.tier1[0:4], 0, 600) == \
self.inversion).all().all(), True)
#self.failUnlessEqual(invert_intervalframe(self.tier1, 0,
# self.tier1['end_time'].iloc[-1])\
# ['start_time'].ix[len(invert_intervalframe\
# (self.tier1, 0, self.tier1['end_time'].\
# iloc[-1]))-1], 1306.064)
self.failUnlessEqual((self.inv1_str == self.inverted1).all().all(),
True)
self.failUnlessEqual((self.inv_def_pd == self.inv_def_str).all().\
all(), True)
#self.failUnlessEqual((invert_intervalframe(self.tier1[0:10], 0) == \
# self.invert_tier1).all().all(), True)
# self.failUnlessEqual(str(type(invert_intervalframe(self.empty))),
# '<type \'NoneType\'>')
self.failUnlessEqual(str(type(invert_intervalframe(pd.DataFrame()))),
'<type \'NoneType\'>')
# self.failUnlessEqual((invert_intervalframe(self.emptyint, 0) == \
# self.emptyshouldbepd).all().all(), True)
def test_convertions_and_shifts(self):
self.failUnlessEqual((self.convert_pd == \
self.converted_pd).all().all(), True)
self.failUnlessEqual((self.shift == \
self.shifted).all().all(), True)
self.failUnlessEqual((self.dict4['speaker A'] == \
self.tiers_shifted_pd).all().all(), True)
self.failUnlessEqual((self.points == \
self.points_shifted_pd).all().all(), True)
def test_stream_from_intervals(self):
self.failUnlessEqual((create_streamframe_from_intervalframe(self.tier1\
[0:4], start_label_appendix='start',
end_label_appendix='end', fillstep=1)==\
self.stream1).all().all(), True)
self.failUnlessEqual(str(create_streamframe_from_intervalframe(self.\
tier1[0:4], True, start_label_appendix=\
'start', end_label_appendix='end',
fillstep=1).index[2]), '2.491')
self.failUnlessEqual(str(type(create_streamframe_from_intervalframe\
(pd.DataFrame()))), '<type \'NoneType\'>')
def test_get_tier_type(self):
self.failUnlessEqual(get_tier_type(self.withPoint['A']), 'interval')
self.failUnlessEqual(get_tier_type(self.withPoint['P']), 'point')
self.failUnlessEqual(get_tier_type(pd.DataFrame(None,
columns=['start_time',
'end_time',
'text'])),
'interval')
self.failUnlessEqual(get_tier_type(None), None)
def test_get_tier_boundaries(self):
self.failUnlessEqual([int(x) for x in get_tier_boundaries(\
self.withPoint['A'])],
[2, 179])
self.failUnlessEqual([int(x) for x in get_tier_boundaries(\
self.withPoint['P'])],
[2, 32])
self.failUnlessEqual(get_tier_boundaries(None), None)
self.failUnlessEqual(get_tier_boundaries(pd.DataFrame(None, columns=\
['start_time',
'end_time',
'text'])),
None)
self.failUnlessEqual([int(x) for x in get_tier_boundaries(\
self.withPoint['P'][:1])], [2, 2])
def test_join_intervals_by_label_or_by_time(self):
t = self.labeljoin['labels']
self.failUnlessEqual(join_intervals_by_label(t)['end_time'].ix[2], 18)
self.failUnlessEqual(join_intervals_by_label(t, 1.0)['end_time'].ix[5],
18)
self.failUnlessEqual(len(join_intervals_by_label(self.empty)), 0)
self.failUnlessEqual(join_intervals_by_time(t)['end_time'].ix[3], 10)
self.failUnlessEqual(join_intervals_by_time(t, 1.0)['end_time'].ix[1],
6)
self.failUnlessEqual(join_intervals_by_time(t, 2.0, '+')['text'].ix[0],
'a+b+b+b+c')
self.failUnlessEqual(len(join_intervals_by_time(self.empty)), 0)
if __name__ == "__main__":
unittest.main()
| 51.179732 | 80 | 0.42403 | 25,842 | 0.965443 | 0 | 0 | 0 | 0 | 0 | 0 | 5,417 | 0.202376 |
5b75521d38171fc3f17cd63f930032aca3a11c04 | 799 | py | Python | main.py | neohanju/GoogleImageSearchDownload | 09344be0f356816ee16921f75b16114340e40b67 | [
"MIT"
] | null | null | null | main.py | neohanju/GoogleImageSearchDownload | 09344be0f356816ee16921f75b16114340e40b67 | [
"MIT"
] | null | null | null | main.py | neohanju/GoogleImageSearchDownload | 09344be0f356816ee16921f75b16114340e40b67 | [
"MIT"
] | null | null | null | # reference: http://icrawler.readthedocs.io/en/latest/usage.html
from icrawler.builtin import GoogleImageCrawler
import os
dataset_base_dir = 'D:/Workspace/Dataset/fake_image_detection/task_2'
keyword_lists = ['snapchat face swap', 'MSQRD']
for keyword in keyword_lists:
folder_path = dataset_base_dir + '/' + keyword
if not os.path.exists(folder_path):
os.makedirs(folder_path)
print(folder_path + ' is created!')
else:
pass
google_crawler = GoogleImageCrawler(parser_threads=2, downloader_threads=4,
storage={'root_dir': folder_path})
keyword_comma = keyword.replace(' ', ',')
google_crawler.crawl(keyword=keyword, max_num=10000)
print('Crawling ' + keyword + ' is done')
# ()()
# ('')HAANJU.YOO
| 27.551724 | 79 | 0.669587 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 217 | 0.271589 |
5b75d17a228cb623b421648bad8d7be7041786ab | 841 | py | Python | Chapter 3/Q19_Match_output.py | inshaal/CBSE_NCERT_SOLUTIONS | 0804c2b42e80ccf42ad7dc4d91998848529e216d | [
"Unlicense"
] | null | null | null | Chapter 3/Q19_Match_output.py | inshaal/CBSE_NCERT_SOLUTIONS | 0804c2b42e80ccf42ad7dc4d91998848529e216d | [
"Unlicense"
] | null | null | null | Chapter 3/Q19_Match_output.py | inshaal/CBSE_NCERT_SOLUTIONS | 0804c2b42e80ccf42ad7dc4d91998848529e216d | [
"Unlicense"
] | null | null | null | ''' Q19 Predict the output'''
class Match:
''"Runs and Wickets"''
runs=281
wickets=5
def __init__(self,runs,wickets):
self.runs=runs
self.wickets=wickets
print "Runs scored are : ",runs
print "Wickets taken are : ",wickets
print "Test.__do__ :",Match.__doc__
print "Test.__name__ : ",Match.__name__
print "Test.__module__ : ",Match.__module__
print "Test.__bases__ : ",Match.__bases__
print "Test.__dict__ : ",Match.__dict__
'''
SOLUTIONS : This is the output -
Runs scored are : 281
Wickets taken are : 5
Test.__do__ : Runs and Wickets
Test.__name__ : Match
Test.__module__ : __main__
Test.__bases__ : ()
Test.__dict__ : {'__module__': '__main__', 'runs': 281, '__doc__': 'Runs and Wickets', '__init__': <function __init__ at 0x0398BA70>, 'wickets': 5}
'''
| 26.28125 | 149 | 0.653983 | 244 | 0.290131 | 0 | 0 | 0 | 0 | 0 | 0 | 532 | 0.63258 |
5b77b55412440ca872621e6080c45d3d24321ec7 | 1,632 | py | Python | src/daipeproject/silver/01_some_notebook.py | DataSentics/daipe-bad-practices-1 | a23c1b42908f646763a3a1d0821600f8d0d78db2 | [
"MIT"
] | null | null | null | src/daipeproject/silver/01_some_notebook.py | DataSentics/daipe-bad-practices-1 | a23c1b42908f646763a3a1d0821600f8d0d78db2 | [
"MIT"
] | null | null | null | src/daipeproject/silver/01_some_notebook.py | DataSentics/daipe-bad-practices-1 | a23c1b42908f646763a3a1d0821600f8d0d78db2 | [
"MIT"
] | null | null | null | # Databricks notebook source
# MAGIC %run ../app/bootstrap
# COMMAND ----------
from pyspark.sql.dataframe import DataFrame
from datalakebundle.imports import transformation
# COMMAND ----------
datasets = [
{
"id": "123",
"name": "knihydobrovsky_cz",
"custom_attrs": {
105: "EXT_ID",
104: "ADFORM_ID",
2: "GA_ID",
},
},
{
"id": "4564",
"name": "knihomol_cz",
"custom_attrs": {
3: "EXT_ID",
2: "GA_ID",
},
},
]
# TODO 2: tahání configu a předávání přes globální proměnnou
@transformation("%datalake.base_base_path%")
def get_config(base_base_path: str):
return base_base_path
base_path = get_config.result
# TODO 1: cyklus
for dataset in datasets:
# TODO 3: use logger instead of print
print(dataset['name'])
dataset_name = dataset['name']
@transformation()
def load_visits():
return spark.read.format("delta").load(base_path + "/bronze/raw/visits/" + dataset_name)
def load_custom_attrs():
return spark.read.format("delta").load(base_path + "/bronze/raw/custom_attrs/" + dataset_name)
# TODO 4: rule of thumb: one notebook should always produce/output one dataset
@transformation(load_visits)
def save_visits(df: DataFrame):
df.write.format("delta").save(base_path + "/silver/parsed/visits/" + dataset_name, mode="append")
@transformation(load_custom_attrs)
def save_custom_attrs(df: DataFrame):
df.write.format("delta").save(base_path + "/silver/parsed/custom_attrs/" + dataset_name, mode="append")
| 27.661017 | 111 | 0.628676 | 0 | 0 | 0 | 0 | 602 | 0.366626 | 0 | 0 | 615 | 0.374543 |
5b789ac50113811cea9f5671853ed49ad42da467 | 560 | py | Python | tests/pyspark_utils/test_convert_cerberus_schema_to_pyspark.py | ONS-SST/cis_households | e475df5929e6763a46cd05aff1f7e960ccbe8e21 | [
"MIT"
] | null | null | null | tests/pyspark_utils/test_convert_cerberus_schema_to_pyspark.py | ONS-SST/cis_households | e475df5929e6763a46cd05aff1f7e960ccbe8e21 | [
"MIT"
] | 252 | 2021-05-19T11:12:43.000Z | 2022-03-02T10:39:10.000Z | tests/pyspark_utils/test_convert_cerberus_schema_to_pyspark.py | ONS-SST/cis_households | e475df5929e6763a46cd05aff1f7e960ccbe8e21 | [
"MIT"
] | null | null | null | from pyspark.sql.types import StructField
from cishouseholds.pyspark_utils import convert_cerberus_schema_to_pyspark
def test_conversion():
cerberus_schema = {"id": {"type": "string"}, "whole_number": {"type": "integer"}}
pyspark_schema = convert_cerberus_schema_to_pyspark(cerberus_schema)
assert len(pyspark_schema) == len(cerberus_schema)
assert sorted([column_schema.name for column_schema in pyspark_schema]) == sorted(cerberus_schema.keys())
assert all(isinstance(column_schema, StructField) for column_schema in pyspark_schema)
| 40 | 109 | 0.783929 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.083929 |
5b78eb23d489938fa4e1d56da1cf000445c3616a | 2,954 | py | Python | bus_schedule.py | 32-52/LanitBusScheduleBot | 1f50b824c2cbd07e1855eb6bce59595b8c639e40 | [
"MIT"
] | 3 | 2019-11-05T08:15:32.000Z | 2019-11-08T05:20:32.000Z | bus_schedule.py | 32-52/LanitBusScheduleBot | 1f50b824c2cbd07e1855eb6bce59595b8c639e40 | [
"MIT"
] | 25 | 2019-11-06T08:54:09.000Z | 2021-12-13T20:25:45.000Z | bus_schedule.py | 32-52/LanitBusScheduleBot | 1f50b824c2cbd07e1855eb6bce59595b8c639e40 | [
"MIT"
] | 3 | 2019-11-02T22:20:09.000Z | 2019-11-26T09:52:43.000Z | from models import Destinations, Locations
from settings import logging
from datetime import datetime
import requests
import settings
class LanitBusInfo:
@staticmethod
def get_nearest_bus(location: Locations, destination: Destinations) -> str:
logging.info('Getting nearest bus started...')
location_data = None
if location == Locations.MARINA_ROSHHA:
location_data = 'm'
elif location == Locations.PLOSHHAD_ILICHA:
location_data = 'p'
elif location == Locations.RIZHSKAJA:
location_data = 'r'
destination_data = None
if destination == Destinations.TO_METRO:
destination_data = 'to_metro'
elif destination == Destinations.TO_OFFICE:
destination_data = 'to_office'
response = requests.get(
f'https://transport.lanit.ru/api/times/{location_data}').json()
message_format = f'Сейчас {settings.days[datetime.today().weekday()]} {response["info"]["now"]}\n' \
f'Метро: {location.value}\n' \
f'Куда: {destination.value}\n'
if datetime.today().weekday() > 4:
logging.debug(
f'message_format {type(message_format)} = {message_format}')
logging.info('Getting nearest bus completed')
message_format += 'Сегодня маршруток не будет'
return message_format
elif response['time'][destination_data]['nearest'] is not False:
message_format += f'Ближайшая маршрутка будет через {response["time"][destination_data]["left"]} ' \
f'в {response["time"][destination_data]["nearest"]}\n'
if response["time"][destination_data]["next"] is not False:
message_format += f'Следующая будет в {response["time"][destination_data]["next"]}\n'
else:
message_format += f'Маршруток больше сегодня не будет\n'
if response['info']['warning'] is not False:
message_format += f"Важно: {response['info'][destination_data]['warning']}"
logging.debug(
f'message_format {type(message_format)} = {message_format}')
logging.info('Getting nearest bus completed')
return message_format
elif response['time'][destination_data]['nearest'] is False:
message_format += f'Сегодня маршруток не будет.\n'
if response['info']['warning'] is not False:
message_format += f"Предупреждение: {response['info'][destination_data]['warning']}"
logging.debug(
f'message_format {type(message_format)} = {message_format}')
logging.info('Getting nearest bus completed')
return message_format
else:
message_format = 'К сожалению не удалось получить расписание\n'
return message_format
| 43.441176 | 112 | 0.602911 | 3,007 | 0.956425 | 0 | 0 | 2,983 | 0.948791 | 0 | 0 | 1,256 | 0.399491 |
5b79805d4767cfaf22c2d662504bc4ac42b4ebc2 | 1,808 | py | Python | test/unit/controllers/logging_api_test.py | beer-garden/brew-view | e973d490fc2a25c0c264b04c9494f64df2db283f | [
"MIT"
] | 5 | 2018-02-20T13:34:55.000Z | 2020-08-09T01:45:39.000Z | test/unit/controllers/logging_api_test.py | beer-garden/brew-view | e973d490fc2a25c0c264b04c9494f64df2db283f | [
"MIT"
] | 179 | 2018-02-06T13:11:41.000Z | 2022-03-10T13:12:15.000Z | test/unit/controllers/logging_api_test.py | beer-garden/brew-view | e973d490fc2a25c0c264b04c9494f64df2db283f | [
"MIT"
] | 1 | 2019-01-03T17:35:35.000Z | 2019-01-03T17:35:35.000Z | import unittest
import json
from . import TestHandlerBase
from mock import patch
class LoggingApiTest(TestHandlerBase):
def setUp(self):
super(LoggingApiTest, self).setUp()
@patch("brew_view.controllers.logging_api.MongoParser.serialize_logging_config")
def test_get_config(self, serialize_mock):
serialize_mock.return_value = "serialized_logging_config"
response = self.fetch("/api/v1/config/logging")
self.assertEqual(200, response.code)
self.assertEqual("serialized_logging_config", response.body.decode("utf-8"))
@patch("brew_view.load_plugin_logging_config")
@patch("brew_view.controllers.system_api.MongoParser.serialize_logging_config")
def test_patch_reload(self, serialize_mock, load_mock):
serialize_mock.return_value = "serialized_logging_config"
response = self.fetch(
"/api/v1/config/logging",
method="PATCH",
body='{"operations": [{"operation": "reload"}]}',
headers={"content-type": "application/json"},
)
self.assertEqual(200, response.code)
self.assertEqual("serialized_logging_config", response.body.decode("utf-8"))
self.assertEqual(load_mock.call_count, 1)
@patch("brew_view.controllers.system_api.MongoParser.serialize_logging_config")
def test_patch_invalid_operation(self, serialize_mock):
body = json.dumps({"operations": [{"operation": "INVALID"}]})
serialize_mock.return_value = "serialized_logging_config"
response = self.fetch(
"/api/v1/config/logging",
method="PATCH",
body=body,
headers={"content-type": "application/json"},
)
self.assertEqual(400, response.code)
if __name__ == "__main__":
unittest.main()
| 35.45098 | 84 | 0.678097 | 1,674 | 0.925885 | 0 | 0 | 1,553 | 0.85896 | 0 | 0 | 636 | 0.35177 |
5b7ae6b9736863ef24badaeecf71daf094c1c542 | 4,646 | py | Python | src/sparkcleaner/functions/string_cleaning.py | IvoWnds/sparkcleaner-git | b2acac27da1f05ed93941c59fe870b62c3539536 | [
"MIT"
] | null | null | null | src/sparkcleaner/functions/string_cleaning.py | IvoWnds/sparkcleaner-git | b2acac27da1f05ed93941c59fe870b62c3539536 | [
"MIT"
] | null | null | null | src/sparkcleaner/functions/string_cleaning.py | IvoWnds/sparkcleaner-git | b2acac27da1f05ed93941c59fe870b62c3539536 | [
"MIT"
] | null | null | null | from typing import List, Optional, Type
import pyspark.sql.functions as F
from pyspark.sql import DataFrame as SparkDataFrame
from pyspark.sql.types import DataType
import src.sparkcleaner.helpers.verify as verify
def remove_leading_zeros(df: SparkDataFrame,
col_name: str,
maintain_type: bool = True) -> SparkDataFrame:
"""Remove leading zeros from column using regex.
Parameters
----------
(required) df: pyspark.sql.DataFrame
Pyspark DataFrame containing column to be processed
(required) col_name: str
name of column to remove leading zeros from
(optional) maintain_type: bool = True
If false, returns col as str.
If true, returns col as type before function call
Returns
----------
pyspark.sql.DataFrame
processed column in place
See Also
----------
pyspark.sql.functions.regexp_replace()
pyspark.sql.Column.cast()
Example
----------
my_df = remove_leading_zeros(my_df, "MY_COL", False)
"""
_rlz_func_verify_input_types(df, col_name, maintain_type)
original_type: DataType = df.schema[col_name].dataType
df = df.withColumn(col_name, F.regexp_replace(F.col(col_name),
r'^[0]*', ""))
df = _if_maintain_type_cast_original_type(df, col_name,
maintain_type, original_type)
return df
def _rlz_func_verify_input_types(df: SparkDataFrame,
col_name: str,
maintain_type: bool) -> None:
input_vals: List[type] = [SparkDataFrame, str, bool]
expected_vals: List[type] = [type(df),
type(col_name),
type(maintain_type)]
verify.verify_func_input(input_vals, expected_vals)
def _if_maintain_type_cast_original_type(df: SparkDataFrame,
col_name: str,
maintain_type: bool,
original_type: DataType) -> SparkDataFrame:
if maintain_type:
df = df.withColumn(col_name,
F.col(col_name)
.cast(original_type)
.alias(col_name))
return df
def keep_alphanumeric_string(df: SparkDataFrame,
col_name: str,
maintain_type: bool = True,
keep_spaces: bool = True) -> SparkDataFrame:
"""Removes all non-alphanumeric characters from column using regex
Parameters
----------
(required) df: pyspark.sql.DataFrame
Pyspark DataFrame containing column to be processed
(required) col_name: str
name of column to remove non-alphanumeric characters from
(optional) maintain_type: bool = True
If false, returns col as str.
If true, returns col as type before function call
(optional) keep_spaces: bool = True
If false, removes all spaces from col
If true, leaves spaces in col
Returns
----------
pyspark.sql.DataFrame
processed column in place
See Also
----------
pyspark.sql.functions.regexp_replace()
pyspark.sql.Column.cast()
Example
----------
my_df = keep_alphanumeric_string(my_df, "MY_COL", False)
"""
_kes_func_verify_input_types(df, col_name, maintain_type, keep_spaces)
original_type: DataType = df.schema[col_name].dataType
regex: str = _set_regex(keep_spaces)
df = df.withColumn(col_name, F.regexp_replace(F.col(col_name),
regex, ""))
df = _if_maintain_type_cast_original_type(df, col_name,
maintain_type, original_type)
return df
def _set_regex(keep_spaces: bool) -> str:
if keep_spaces:
regex: str = r'[^A-Za-z0-9 ]' # spaces & alphanumeric
else:
regex: str = r'[^A-Za-z0-9]' # alphanumeric
return regex
def _kes_func_verify_input_types(df: SparkDataFrame,
col_name: str,
maintain_type: bool,
keep_spaces: bool) -> None:
input_vals: List[type] = [SparkDataFrame, str, bool, bool]
expected_vals: List[type] = [type(df),
type(col_name),
type(maintain_type),
type(keep_spaces)]
verify.verify_func_input(input_vals, expected_vals)
| 34.932331 | 84 | 0.566939 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,607 | 0.345889 |
5b7afef62f513c7426b0ae16d60e1e1c3c3a0c71 | 1,811 | py | Python | src/unifi_api/utils/decorators.py | r4mmer/unifi_python_client | 54c89f1c66cf219a37fd7317566defbf2ef4c9ff | [
"MIT"
] | 1 | 2021-01-25T16:19:09.000Z | 2021-01-25T16:19:09.000Z | src/unifi_api/utils/decorators.py | ioannova/unifi_python_api | cd8c6549cdd14294c9747acd8b053cb911d22305 | [
"MIT"
] | null | null | null | src/unifi_api/utils/decorators.py | ioannova/unifi_python_api | cd8c6549cdd14294c9747acd8b053cb911d22305 | [
"MIT"
] | 3 | 2020-01-09T14:30:41.000Z | 2022-01-31T23:22:35.000Z | # custom decorators
from functools import wraps
import trafaret as t
from .exceptions import UnifiLoginError
from .models import JsonResponse
def call_requires_login(func):
def validate(resp):
if resp.status_code == 401 and 'application/json' in resp.headers.get('Content-Type'):
d = JsonResponse.check(resp.json())
if d['meta']['msg'] == 'api.err.LoginRequired':
return
return resp
@wraps(func)
def wrapper(self, *args, **kwargs):
from ..base_api import AbstractUnifiSession
assert isinstance(self, AbstractUnifiSession), 'Calls must be made from an AbstractUnifiSession subclass'
r = None
for i in range(3):
r = func(self, *args, **kwargs)
if validate(r) is not None:
break
self.debug('*****needs to reconnect to controller')
self.clear_cookies()
self.login()
else:
raise UnifiLoginError('Reconnection to controller failed')
return r
return wrapper
def requires_login(func):
@wraps(func)
def wrapper(self, *args, **kwargs):
from ..base_api import AbstractUnifiSession
assert isinstance(self, AbstractUnifiSession), 'Must be called from an AbstractUnifiSession subclass'
# could try sometimes to ease bad connection cases
if not self.logged_in:
self.login()
return func(self, *args, **kwargs)
return wrapper
def guard(params=None, **kwargs):
specs = t.Forward()
if params is None:
specs << t.Dict(**kwargs)
else:
specs << params
def wrapped(fn):
guarder = t.guard(specs)
wrapper = wraps(fn)(guarder(fn))
wrapper.__doc__ = fn.__doc__
return wrapper
return wrapped
| 31.224138 | 113 | 0.619547 | 0 | 0 | 0 | 0 | 964 | 0.532303 | 0 | 0 | 321 | 0.17725 |
5b7bc8ef2ba6b2dcd2a0766bba13e09db8e6d318 | 484 | py | Python | examples/another_simple_example.py | Inzilkin/vk.py | 969f01e666c877c1761c3629a100768f93de27eb | [
"MIT"
] | 2 | 2020-02-28T11:31:11.000Z | 2020-09-16T06:11:11.000Z | examples/another_simple_example.py | Inzilkin/vk.py | 969f01e666c877c1761c3629a100768f93de27eb | [
"MIT"
] | null | null | null | examples/another_simple_example.py | Inzilkin/vk.py | 969f01e666c877c1761c3629a100768f93de27eb | [
"MIT"
] | 1 | 2020-03-03T20:49:57.000Z | 2020-03-03T20:49:57.000Z | from vk import VK
from vk.utils import TaskManager
import asyncio
import logging
logging.basicConfig(level="DEBUG")
token = "TOKEN"
vk = VK(access_token=token)
task_manager = TaskManager(vk.loop)
api = vk.get_api()
async def send_message():
resp = await api.messages.send(peer_id=1, message="hello!", random_id=0)
print(resp)
if __name__ == "__main__":
task_manager.add_task(send_message)
task_manager.run()
task_manager.close() # close event loop manually
| 21.043478 | 76 | 0.733471 | 0 | 0 | 0 | 0 | 0 | 0 | 118 | 0.243802 | 59 | 0.121901 |
5b7f91f8b133bdf24c68950699e17811ceddf1da | 5,493 | py | Python | Ambience/display/EmulatedDisplay.py | Matchstic/automated-ambience | 79fe5f7988fcfeb51c07a281e2ee6e188df4cb60 | [
"BSD-2-Clause"
] | 1 | 2018-07-13T12:03:35.000Z | 2018-07-13T12:03:35.000Z | Ambience/display/EmulatedDisplay.py | Matchstic/automated-ambience | 79fe5f7988fcfeb51c07a281e2ee6e188df4cb60 | [
"BSD-2-Clause"
] | null | null | null | Ambience/display/EmulatedDisplay.py | Matchstic/automated-ambience | 79fe5f7988fcfeb51c07a281e2ee6e188df4cb60 | [
"BSD-2-Clause"
] | null | null | null | from tkinter import Tk, Canvas
# This is an emulated display with the same API interface as for the Unicorn HAT/pHAT hardware.
# Thus, it relies upon (in part) code from: https://github.com/pimoroni/unicorn-hat/blob/master/library/UnicornHat/unicornhat.py
# Note that only the pHAT is supported, and rotation of the display is not supported.
class EmulatedGUI():
def __init__(self, master):
self.master = master
master.title("Emulated")
self.map = []
self.pixels = [None for x in range(64)]
self.pixel_colours = ["#000000" for x in range(64)]
self.brightness = 1.0
def setup(self, pxmap):
self.map = pxmap
# Add GUI elements in a grid
row = 0
col = 0
for list in self.map:
col = 0
for index in list:
pixel = Canvas(self.master, width=30, height=30)
pixel.grid(row=row, column=col)
pixel.configure(background="black", highlightbackground="black", bd=1)
self.pixels[index] = pixel
col = col + 1
row = row + 1
def set_pixel(self, idx, r, g, b):
colour = '#%02x%02x%02x' % (r, g, b)
self.pixel_colours[idx] = colour
def set_brightness(self, brightness):
self.brightness = brightness
def update(self):
try:
index = 0
for pixel in self.pixels:
pixel.configure(background=self.pixel_colours[index])
index = index + 1
except:
pass
class EmulatedDisplay():
def __init__(self):
self.wx = 8
self.wy = 8
self.map = self.PHAT
self.pixels = [(0,0,0) for x in range(64)]
self.brightness_val = 0.2
self.is_setup = False
self.gui = None
# Modifed from the UnicornHAT Python library
# Available: https://github.com/pimoroni/unicorn-hat/blob/master/library/UnicornHat/unicornhat.py
@property
def PHAT(self):
return [[24, 16, 8, 0],
[25, 17, 9, 1],
[26, 18, 10, 2],
[27, 19, 11, 3],
[28, 20, 12, 4],
[29, 21, 13, 5],
[30, 22, 14, 6],
[31, 23, 15, 7]]
def set_layout(self, pixel_map):
self.map = self.PHAT
def setup(self):
if self.is_setup == True:
return
# Start the GUI loop
self.root = Tk()
# Ensure we stay above other windows
self.root.attributes("-topmost", True)
self.root.configure(background='black')
self.root.lift()
self.gui = EmulatedGUI(self.root)
self.gui.setup(self.map)
self.is_setup = True
try:
self.root.mainloop()
except KeyboardInterrupt:
pass
def get_shape(self):
return (len(self.map), len(self.map[0]))
def rotation(self, r=0):
pass
def get_rotation(self):
return 0
def brightness(self, b=0.2):
self.brightness_val = b
if self.gui is not None:
self.gui.set_brightness(b)
def get_brightness():
return self.brightness_val
def clear():
for x in range(64):
self.pixels[x] = (0, 0, 0)
def off():
self.clear()
self.show()
def get_index_from_xy(self, x, y):
self.wx = len(self.map) - 1
self.wy = len(self.map[0]) - 1
y = (self.wy)-y
if self.rotation == 90 and self.wx == self.wy:
x, y = y, (self.wx)-x
elif self.rotation == 180:
x, y = (self.wx)-x, (self.wy)-y
elif self.rotation == 270 and self.wx == self.wy:
x, y = (self.wy)-y, x
try:
index = self.map[x][y]
except IndexError:
index = None
return index
def set_pixel(self, x, y, r=None, g=None, b=None):
if self.is_setup is False:
return
if type(r) is tuple:
r, g, b = r
elif type(r) is str:
try:
r, g, b = COLORS[r.lower()]
except KeyError:
raise ValueError('Invalid color!')
index = self.get_index_from_xy(x, y)
if index is not None:
self.pixels[index] = (r, g, b)
self.gui.set_pixel(index, r, g, b)
def get_pixel(self, x, y):
index = self.get_index_from_xy(x, y)
if index is not None:
return self.pixels[index]
def set_all(self, r, g=None, b=None):
shade_pixels(lambda x, y: (r, g, b))
def shade_pixels(self, shader):
width, height = self.get_shape()
for x in range(width):
for y in range(height):
r, g, b = shader(x, y)
self.set_pixel(x, y, r, g, b)
def set_pixels(self, pixels):
self.shade_pixels(lambda x, y: pixels[y][x])
def get_pixels(self):
width, height = self.get_shape()
return [[self.get_pixel(x, y) for x in range(width)] for y in range(height)]
def show(self):
if self.is_setup is False:
return
self.gui.update() | 28.169231 | 128 | 0.497724 | 5,144 | 0.936465 | 0 | 0 | 293 | 0.053341 | 0 | 0 | 616 | 0.112143 |
5b805582575f471d3237916e3dc521e5bb626336 | 1,033 | py | Python | TEP/lista3/C.py | GuilhermeBraz/unb-workflow | 37d680a675a87cea2ff936badf94d757393870c3 | [
"MIT"
] | null | null | null | TEP/lista3/C.py | GuilhermeBraz/unb-workflow | 37d680a675a87cea2ff936badf94d757393870c3 | [
"MIT"
] | null | null | null | TEP/lista3/C.py | GuilhermeBraz/unb-workflow | 37d680a675a87cea2ff936badf94d757393870c3 | [
"MIT"
] | null | null | null | # Argus was charged with guarding Io, which is not an ordinary cow. Io is quite an explorer, and she wanders off rather frequently, making Argus' life stressful. So the cowherd decided to construct an enclosed pasture for Io.
# There are nnn trees growing along the river, where Argus tends Io. For this problem, the river can be viewed as the OXOXOX axis of the Cartesian coordinate system, and the nnn trees as points with the yyy-coordinate equal 000. There is also another tree growing in the point (0,1)(0, 1)(0,1).
# Argus will tie a rope around three of the trees, creating a triangular pasture. Its exact shape doesn't matter to Io, but its area is crucial to her. There may be many ways for Argus to arrange the fence, but only the ones which result in different areas of the pasture are interesting for Io. Calculate the number of different areas that her pasture may have. Note that the pasture must have nonzero area.
t = int(input())
for i in range(t):
n = int(input())
xs = list(map(int, input().split()))
| 114.777778 | 408 | 0.75605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 927 | 0.897386 |
5b81f231a630775497623e340d76ccd4baa322f0 | 5,463 | py | Python | pyowb/open_work_bench.py | fifoforlifo/pyowb | 33833ad564c3f2d7b72c2fa4d66f9cab10ff9a22 | [
"Apache-2.0"
] | null | null | null | pyowb/open_work_bench.py | fifoforlifo/pyowb | 33833ad564c3f2d7b72c2fa4d66f9cab10ff9a22 | [
"Apache-2.0"
] | null | null | null | pyowb/open_work_bench.py | fifoforlifo/pyowb | 33833ad564c3f2d7b72c2fa4d66f9cab10ff9a22 | [
"Apache-2.0"
] | 1 | 2021-02-14T17:12:51.000Z | 2021-02-14T17:12:51.000Z | # Python plan -> Open Workbench XML converter.
#
# Python plan defines a Work Breakdown Structure where
# tasks are dictionaries and children are defined in a list.
# Children can contain sequences, to simplify data input;
# sequenced tasks are automatically chained (dependencies).
import sys
import math
from datetime import datetime, timedelta
from .keywords import *
from .tasks import *
# Start date is a monday. End-date calculation needs to add 2 days per 5 (for weekends);
# starting on a monday simplifies calculation of the extra.
_global_start_date = datetime(year=2016, month=10, day=10)
def _insert_dependency(deps, successor, predecessor):
if successor not in deps:
deps[successor] = {}
deps[successor][predecessor] = True
def _validate_tasks(id_to_task, deps):
for task in id_to_task.values():
for predecessor_id in task[DEPS]:
if predecessor_id not in id_to_task:
sys.stderr.write('WARNING: ID={task[ID]} NAME={task[NAME]} : unknown dependency "{predecessor_id}"\n'.format(**locals()))
_insert_dependency(deps, task[ID], predecessor_id)
def _date_as_owb_string(date):
return date.strftime('%Y-%m-%dT%H:%M:%S')
def _output_tasks_recursive(outfile, task, level):
_effort_in_days = task.get(EFFORT, 0)
_effort_in_calendar_days = _effort_in_days + math.floor((_effort_in_days - 1) / 5) * 2
_category = parse_category(task[NAME])
_name = xml_escape_attr(task[NAME])
_id = xml_escape_attr(task[ID])
_desc = xml_escape_attr(task.get(DESC, ' '))
_level = level
_summary = 'true' if has_children(task) else 'false'
_start_date = _date_as_owb_string(_global_start_date)
_end_date = _date_as_owb_string(_global_start_date + timedelta(days=_effort_in_calendar_days))
task_xml = '''
<Task
category="{_category}" start="{_start_date}" finish="{_end_date}"
proxy="false"
critical="false" status="0" outlineLevel="{_level}" summary="{_summary}"
milestone="false" name={_name} taskID={_id} fixed="false"
locked="false" key="false" percComp="0.0" totalSlack="9.0" unplanned="false">
<Notes>
<Note
createdBy="Unknown" createdDate="2016-10-09T05:45:21" content={_desc}/>
</Notes>
</Task>
'''
formatted_task = task_xml.lstrip('\n').format(**locals())
outfile.write(formatted_task)
children = task.get(CHILDREN, None)
if children:
for child in children:
if isinstance(child, str):
continue
else:
_output_tasks_recursive(outfile, child, level+1)
def _output_tasks(outfile, plan):
prefix = '''
<Tasks>
'''
suffix = '''
</Tasks>
'''
outfile.write(prefix.lstrip('\n'))
_output_tasks_recursive(outfile, plan, 1)
outfile.write(suffix.lstrip('\n'))
# returns dict(leaf predecessor_id, True) to be used as a set
#
# OWB ignores dependencies on non-leaf tasks; therefore we must
# recursively resolve the dependencies down to leaf nodes.
def _get_leaf_predecessor_ids(id_to_task, predecessor_id, leaf_predecessor_ids):
def _recursive_resolve(id):
task = id_to_task[id]
if has_children(task):
for child in task[CHILDREN]:
if isinstance(child, str):
continue
_recursive_resolve(child[ID])
else:
leaf_predecessor_ids[id] = True
_recursive_resolve(predecessor_id)
def _output_dependencies(outfile, id_to_task, deps):
prefix = '''
<Dependencies>
'''
suffix = '''
</Dependencies>
'''
outfile.write(prefix.lstrip('\n'))
for successor_id,predecessor_ids in sorted(deps.items()):
if has_children(id_to_task[successor_id]):
continue
leaf_predecessor_ids = {} # id:True
for predecessor_id in predecessor_ids.keys():
_get_leaf_predecessor_ids(id_to_task, predecessor_id, leaf_predecessor_ids)
for leaf_predecessor_id in sorted(leaf_predecessor_ids.keys()):
outfile.write(''' <Dependency
predecessorID="{leaf_predecessor_id}" startFinishType="0" lag="0.0" lagType="0" successorID="{successor_id}"/>
'''.format(**locals()))
outfile.write(suffix.lstrip('\n'))
def _output_main_file(outfile, plan):
prefix = '''
<?xml version="1.0"?>
<WORKBENCH_PROJECT>
<BaseCalendars>
<Calendar
name="Standard">
</Calendar>
</BaseCalendars>
<Projects>
<Project
UID="AJO44]`-U_```!/5"LU<!```?P```0" closed="false" active="true" approved="false"
start="2016-10-10T08:00:00" openForTimeEntry="true" format="0" trackMode="0" finish="2016-10-10T08:00:00"
priority="10" finishImposed="false" cpmType="0" name="Project Plan" startImposed="false"
program="false">
'''
suffix = '''
</Project>
</Projects>
</WORKBENCH_PROJECT>'''
# key = ID string, value = task dict
id_to_task = {}
sanitize_tasks(plan, id_to_task, add_child_dependencies=True)
# key = successor, value = {predecessor:True}
deps = {}
_validate_tasks(id_to_task, deps)
outfile.write(prefix.lstrip('\n'))
_output_tasks(outfile, plan)
_output_dependencies(outfile, id_to_task, deps)
outfile.write(suffix.lstrip('\n'))
def plan_to_owb_xml(filename, plan):
with open(filename, 'wt') as outfile:
_output_main_file(outfile, plan)
| 34.14375 | 137 | 0.658796 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,179 | 0.398865 |
5b8245e73f37dd605b04cea9066a60ec35f34aae | 3,123 | py | Python | utils/data_utils.py | junsu-kim97/self_improved_retro | 5bb2e641a57a0623f509dd7a006896e6a105373c | [
"MIT"
] | 9 | 2021-06-21T02:19:57.000Z | 2022-02-25T02:56:13.000Z | utils/data_utils.py | junsu-kim97/self_improved_retro | 5bb2e641a57a0623f509dd7a006896e6a105373c | [
"MIT"
] | null | null | null | utils/data_utils.py | junsu-kim97/self_improved_retro | 5bb2e641a57a0623f509dd7a006896e6a105373c | [
"MIT"
] | 1 | 2021-11-25T01:15:10.000Z | 2021-11-25T01:15:10.000Z | import rdkit.Chem as Chem
import pickle
def smi_tokenizer(smi):
"""
Tokenize a SMILES molecule or reaction
"""
import re
pattern = "(\[[^\]]+]|Br?|Cl?|N|O|S|P|F|I|b|c|n|o|s|p|\(|\)|\.|=|#|-|\+|\\\\|\/|:|~|@|\?|>|\*|\$|\%[0-9]{2}|[0-9])"
regex = re.compile(pattern)
tokens = [token for token in regex.findall(smi)]
assert smi == ''.join(tokens)
return ' '.join(tokens)
def canonicalize_smiles(smiles):
try:
mol = Chem.MolFromSmiles(smiles)
except:
mol = None
if mol is None:
return ''
else:
return Chem.MolToSmiles(mol)
def canonicalize(smiles=None, smiles_list=None):
"""Return the canonicalized version of the given smiles or smiles list"""
assert (smiles is None) != (smiles_list is None) # Only take one input
if smiles is not None:
return canonicalize_smiles(smiles)
elif smiles_list is not None:
# Convert smiles to mol and back to cannonicalize
new_smiles_list = []
for smiles in smiles_list:
new_smiles_list.append(canonicalize_smiles(smiles))
return new_smiles_list
def read_txt(file_path, detokenize=False):
out_list = []
with open(file_path, "r") as f:
while True:
line = f.readline().rstrip()
if not line:
break
if detokenize:
line = "".join(line.split(" "))
out_list.append(line)
return out_list
def read_file(file_path, beam_size=1, max_read=-1, parse_func=None):
read_file = open(file_path, 'r+')
output_list = [] # List of beams if beam_size is > 1 else list of smiles
cur_beam = [] # Keep track of the current beam
for line in read_file.readlines():
if parse_func is None:
parse = line.strip().replace(' ', '') # default parse function
if ',' in parse:
# If output separated by commas, return first by default
parse = parse.split(',')[0]
else:
parse = parse_func(line)
cur_beam.append(parse)
if len(cur_beam) == beam_size:
if beam_size == 1:
output_list.append(cur_beam[0])
else:
output_list.append(cur_beam)
if max_read != -1 and len(output_list) >= max_read:
break
cur_beam = []
read_file.close()
return output_list
def remove_atom_mapping(smiles):
mol = Chem.MolFromSmiles(smiles)
mol = Chem.RemoveHs(mol)
for atom in mol.GetAtoms():
atom.ClearProp('molAtomMapNumber')
smiles = Chem.MolToSmiles(mol)
return smiles
def txt2pkl(txt_path, pkl_path):
input_list = read_txt(txt_path, detokenize=True)
input_list = [[line] for line in input_list]
with open(pkl_path, 'wb') as f:
pickle.dump(input_list, f)
if __name__ == '__main__':
txt2pkl(txt_path='/data/junsu_data/ssl-rxn/retro_smiles_transformer/dataset/schneider50k/backward/src-train.txt',
pkl_path='/home/junsu/workspace/retro_star/biased_one_step/data/cooked_schneider50k/src-train.pkl')
| 30.028846 | 120 | 0.603907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 715 | 0.228947 |
5b85082b3ec4e8e4086dbcbe9c13c3b0a4b1548b | 3,237 | py | Python | src/scraping/newslists_scrapers/ukrnet.py | mstrechen/news-scraper | 7961cef22f3989ad48140c7695b32662f9fd57ab | [
"MIT"
] | null | null | null | src/scraping/newslists_scrapers/ukrnet.py | mstrechen/news-scraper | 7961cef22f3989ad48140c7695b32662f9fd57ab | [
"MIT"
] | null | null | null | src/scraping/newslists_scrapers/ukrnet.py | mstrechen/news-scraper | 7961cef22f3989ad48140c7695b32662f9fd57ab | [
"MIT"
] | null | null | null | from queue import Queue
from datetime import datetime, timedelta
from .INewslistScraper import INewslistScraper
from .. import article
from .. import driver
class Scraper(INewslistScraper):
def __init__(self, limit: int = 100):
INewslistScraper.__init__(self, limit)
self._tag_to_url = {
"politics" : "https://www.ukr.net/news/politika.html",
"economics" : "https://www.ukr.net/news/jekonomika.html",
"accidents" : "https://www.ukr.net/news/proisshestvija.html",
"society" : "https://www.ukr.net/news/society.html",
"technologies" : "https://www.ukr.net/news/tehnologii.html",
"science" : "https://www.ukr.net/news/science.html",
"auto" : "https://www.ukr.net/news/avto.html",
"sport" : "https://www.ukr.net/news/sport.html",
"health" : "https://www.ukr.net/news/zdorove.html",
"celebrities" : "https://www.ukr.net/news/show_biznes.html",
"global" : "https://www.ukr.net/news/za_rubezhom.html",
"fun" : "https://www.ukr.net/news/kurezy.html",
"photoreport" : "https://www.ukr.net/news/fotoreportazh.html",
"video" : "https://www.ukr.net/news/video.html"
}
self.driver = driver.driver
self.xpath = {
"absolute_article_path" : '//*[@id="main"]/div/article/section'
}
self.monthshorts = [u"січ", u"лют", u"бер", u"кві", u"тра", \
u"чер", u"лип", u"сер", u"вер", u"жов", u"лис", u"гру"]
def _load_more(self):
self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
def _date_from_ukr_to_datetime(self, s: str, index: int):
mon = s[s.find(' ') + 1 :]
day = int(s[: s.find(' ')])
return datetime(datetime.today().year, self.monthshorts.index(mon) + 1, \
day, index // 60, index % 60)
def _convert_datetime(self, s: str, index: int):
s = s.strip()
if s.find(':') != -1:
h = int(s[:2])
m = int(s[3:])
return datetime.today() + timedelta(hours=h, minutes=m)
return self._date_from_ukr_to_datetime(s, index)
def _parse_by_tag(self, tag, url, queue: Queue):
dr = self.driver
dr.get(url)
elems = dr.find_elements_by_xpath(self.xpath["absolute_article_path"])
prev_cnt = 0
while len(elems) < self.limit and len(elems) != prev_cnt:
self._load_more()
prev_cnt = len(elems)
elems = dr.find_elements_by_xpath(self.xpath["absolute_article_path"])
for e, index in zip(elems, range(self.limit)):
dt = e.find_element_by_tag_name("time")
dt = self._convert_datetime(dt.text, index)
e = e.find_element_by_tag_name("div")
e = e.find_element_by_tag_name("div")
link = e.find_element_by_tag_name("a")
e_url = link.get_attribute("href")
e_headline = link.text
queue.put_nowait(article.Article(e_url, e_headline, dt, tags=[tag]))
def push_articles_list(self, queue: Queue):
for tag in self._tag_to_url:
self._parse_by_tag(tag, self._tag_to_url[tag], queue)
| 40.4625 | 85 | 0.58789 | 3,110 | 0.950199 | 0 | 0 | 0 | 0 | 0 | 0 | 994 | 0.303697 |
5b85533c3707d4c478552c5ac09e55d8b975b426 | 201 | py | Python | inference.py | andreasr27/bidding_simulator | c2e1665b5eb72d6464025ce330682fb25780cb56 | [
"Apache-2.0"
] | null | null | null | inference.py | andreasr27/bidding_simulator | c2e1665b5eb72d6464025ce330682fb25780cb56 | [
"Apache-2.0"
] | null | null | null | inference.py | andreasr27/bidding_simulator | c2e1665b5eb72d6464025ce330682fb25780cb56 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import regret as r
import sys
import os
n=int(sys.argv[1])
fout=open(sys.argv[3],'w')
print >>fout, n
for i in range(0,n):
print >>fout, i, r.mult_valuation(sys.argv[2],i)
| 15.461538 | 56 | 0.646766 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 20 | 0.099502 |
5b858246ad28edd0c4482b11bf509db078683da1 | 12,676 | py | Python | TAR/dataset.py | jiyanggao/CTAP | 4c8ed32c72763f1b47e607b6e957695f9381930e | [
"MIT"
] | 49 | 2018-07-11T06:33:39.000Z | 2022-02-24T04:43:32.000Z | TAR/dataset.py | jiyanggao/CTAP | 4c8ed32c72763f1b47e607b6e957695f9381930e | [
"MIT"
] | 10 | 2018-07-31T07:01:53.000Z | 2021-01-23T11:16:49.000Z | TAR/dataset.py | jiyanggao/CTAP | 4c8ed32c72763f1b47e607b6e957695f9381930e | [
"MIT"
] | 12 | 2018-07-21T10:30:04.000Z | 2021-07-12T00:32:34.000Z |
import numpy as np
from math import sqrt
import os
import random
import pickle
def calculate_IoU(i0,i1):
union=(min(i0[0],i1[0]) , max(i0[1],i1[1]))
inter=(max(i0[0],i1[0]) , min(i0[1],i1[1]))
iou=1.0*(inter[1]-inter[0])/(union[1]-union[0])
return iou
'''
A class that handles the training set
'''
class TrainingDataSet(object):
def __init__(self,flow_feat_dir,appr_feat_dir,clip_gt_path,background_path,batch_size,movie_length_info,ctx_num,central_num, unit_feature_size,unit_size,
pos_neg_ratio=10.0):
#it_path: image_token_file path
self.ctx_num=ctx_num
self.unit_feature_size=unit_feature_size
self.unit_size=unit_size
self.batch_size=batch_size
self.movie_length_info=movie_length_info
self.visual_feature_dim=self.unit_feature_size
self.flow_feat_dir=flow_feat_dir
self.appr_feat_dir=appr_feat_dir
self.training_samples=[]
self.central_num=central_num
print "Reading training data list from "+clip_gt_path+" and "+background_path
db_size = 0
with open(clip_gt_path) as f:
db_size += len(f.readlines())
with open(background_path) as f:
db_size += len(f.readlines())
with open(clip_gt_path) as f:
for l in f:
movie_name=l.rstrip().split(" ")[0]
clip_start=float(l.rstrip().split(" ")[1])
clip_end=float(l.rstrip().split(" ")[2])
gt_start=float(l.rstrip().split(" ")[3])
gt_end=float(l.rstrip().split(" ")[4])
round_gt_start=np.round(gt_start/unit_size)*self.unit_size+1
round_gt_end=np.round(gt_end/unit_size)*self.unit_size+1
self.training_samples.append((movie_name,clip_start,clip_end,gt_start,gt_end,round_gt_start,round_gt_end,1))
print str(len(self.training_samples))+" training samples are read"
positive_num=len(self.training_samples)*1.0
with open(background_path) as f:
for l in f:
# control the ratio between background samples and positive samples to be 10:1
if random.random()>pos_neg_ratio*positive_num/db_size: continue
movie_name=l.rstrip().split(" ")[0]
clip_start=float(l.rstrip().split(" ")[1])
clip_end=float(l.rstrip().split(" ")[2])
self.training_samples.append((movie_name,clip_start,clip_end,0,0,0,0,0))
self.num_samples=len(self.training_samples)
print str(len(self.training_samples))+" training samples are read"
def calculate_regoffset(self,clip_start,clip_end,round_gt_start,round_gt_end):
start_offset=(round_gt_start-clip_start)/self.unit_size
end_offset=(round_gt_end-clip_end)/self.unit_size
return start_offset, end_offset
'''
Get the central features
'''
def get_pooling_feature(self,flow_feat_dir,appr_feat_dir,movie_name,start,end):
swin_step=self.unit_size
all_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
current_pos=start
while current_pos<end:
swin_start=current_pos
swin_end=swin_start+swin_step
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
current_pos+=swin_step
pool_feat=all_feat
return pool_feat
'''
Get the past (on the left of the central unit) context features
'''
def get_left_context_feature(self,flow_feat_dir,appr_feat_dir,movie_name,start,end):
swin_step=self.unit_size
all_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
count=0
current_pos=start
context_ext=False
while count<self.ctx_num/2:
swin_start=current_pos-swin_step
swin_end=current_pos
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos-=swin_step
count+=1
count=0
current_pos=start
while count<self.ctx_num/2:
swin_start=current_pos
swin_end=current_pos+swin_step
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos+=swin_step
count+=1
if context_ext:
pool_feat=all_feat
else:
# print "no left "+str(start)
pool_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
#print pool_feat.shape
return pool_feat
'''
Get the future (on the right of the central unit) context features
'''
def get_right_context_feature(self,flow_feat_dir,appr_feat_dir,movie_name,start,end):
swin_step=self.unit_size
all_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
count=0
current_pos=end
context_ext=False
while count<self.ctx_num/2:
swin_start=current_pos
swin_end=current_pos+swin_step
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos+=swin_step
count+=1
count=0
current_pos=end
while count<self.ctx_num/2:
swin_start=current_pos-swin_step
swin_end=current_pos
if os.path.exists(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy"):
flow_feat=np.load(flow_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
appr_feat=np.load(appr_feat_dir+movie_name+".mp4"+"_"+str(swin_start)+"_"+str(swin_end)+".npy")
flow_feat=flow_feat/np.linalg.norm(flow_feat)
appr_feat=appr_feat/np.linalg.norm(appr_feat)
#feat=flow_feat
feat=np.hstack((flow_feat,appr_feat))
all_feat=np.vstack((all_feat,feat))
context_ext=True
current_pos-=swin_step
count+=1
if context_ext:
pool_feat=all_feat
else:
# print "no right "+str(end)
pool_feat=np.zeros([0,self.unit_feature_size],dtype=np.float32)
#print pool_feat.shape
return pool_feat
def sample_to_number(self,all_feats,num):
sampled_feats=np.zeros([num,all_feats.shape[1]],dtype=np.float32)
if all_feats.shape[0]==0: return sampled_feats
if all_feats.shape[0]==num: return all_feats
else:
for k in range(num):
sampled_feats[k]=all_feats[all_feats.shape[0]/num*k,:]
return sampled_feats
def next_batch(self):
random_batch_index=random.sample(range(self.num_samples),self.batch_size)
central_batch=np.zeros([self.batch_size,self.central_num, self.visual_feature_dim])
left_batch=np.zeros([self.batch_size, self.ctx_num, self.visual_feature_dim])
right_batch=np.zeros([self.batch_size, self.ctx_num, self.visual_feature_dim])
label_batch=np.zeros([self.batch_size],dtype=np.int32)
offset_batch=np.zeros([self.batch_size,2],dtype=np.float32)
index=0
while index < self.batch_size:
k=random_batch_index[index]
movie_name=self.training_samples[k][0]
if self.training_samples[k][7]==1:
clip_start=self.training_samples[k][1]
clip_end=self.training_samples[k][2]
round_gt_start=self.training_samples[k][5]
round_gt_end=self.training_samples[k][6]
start_offset,end_offset=self.calculate_regoffset(clip_start,clip_end,round_gt_start,round_gt_end)
featmap=self.get_pooling_feature(self.flow_feat_dir, self.appr_feat_dir, movie_name,clip_start,clip_end)
left_feat=self.get_left_context_feature(self.flow_feat_dir, self.appr_feat_dir, movie_name,clip_start,clip_end)
right_feat=self.get_right_context_feature(self.flow_feat_dir, self.appr_feat_dir, movie_name,clip_start,clip_end)
featmap=self.sample_to_number(featmap,self.central_num)
right_feat=self.sample_to_number(right_feat,self.ctx_num)
left_feat=self.sample_to_number(left_feat,self.ctx_num)
central_batch[index,:,:]=featmap
left_batch[index,:,:]=left_feat
right_batch[index,:,:]=right_feat
label_batch[index]=1
offset_batch[index,0]=start_offset
offset_batch[index,1]=end_offset
#print str(clip_start)+" "+str(clip_end)+" "+str(round_gt_start)+" "+str(round_gt_end)+" "+str(start_offset)+" "+str(end_offset)
index+=1
else:
clip_start=self.training_samples[k][1]
clip_end=self.training_samples[k][2]
left_feat=self.get_left_context_feature(self.flow_feat_dir, self.appr_feat_dir,movie_name,clip_start,clip_end)
right_feat=self.get_right_context_feature(self.flow_feat_dir, self.appr_feat_dir,movie_name,clip_start,clip_end)
featmap=self.get_pooling_feature(self.flow_feat_dir, self.appr_feat_dir,movie_name,clip_start,clip_end)
featmap=self.sample_to_number(featmap,self.central_num)
right_feat=self.sample_to_number(right_feat,self.ctx_num)
left_feat=self.sample_to_number(left_feat,self.ctx_num)
central_batch[index,:,:]=featmap
left_batch[index,:,:]=left_feat
right_batch[index,:,:]=right_feat
label_batch[index]=0
offset_batch[index,0]=0
offset_batch[index,1]=0
index+=1
return central_batch, left_batch, right_batch, label_batch,offset_batch
'''
A class that handles the test set
'''
class TestingDataSet(object):
def __init__(self,flow_feat_dir,appr_feat_dir,test_clip_path,batch_size,ctx_num):
self.ctx_num=ctx_num
#il_path: image_label_file path
self.batch_size=batch_size
self.flow_feat_dir=flow_feat_dir
self.appr_feat_dir=appr_feat_dir
print "Reading testing data list from "+test_clip_path
self.test_samples=[]
with open(test_clip_path) as f:
for l in f:
movie_name=l.rstrip().split(" ")[0]
clip_start=float(l.rstrip().split(" ")[1])
clip_end=float(l.rstrip().split(" ")[2])
self.test_samples.append((movie_name,clip_start,clip_end))
self.num_samples=len(self.test_samples)
print "test clips number: "+str(len(self.test_samples))
| 46.602941 | 157 | 0.631982 | 12,301 | 0.970417 | 0 | 0 | 0 | 0 | 0 | 0 | 1,174 | 0.092616 |
5b862b0444a554f17c428be0cfd30de1d4fc4238 | 1,209 | py | Python | github_status/util.py | alfredodeza/github-status | 6790528caef688c4624a998ba14389081188eab7 | [
"MIT"
] | null | null | null | github_status/util.py | alfredodeza/github-status | 6790528caef688c4624a998ba14389081188eab7 | [
"MIT"
] | null | null | null | github_status/util.py | alfredodeza/github-status | 6790528caef688c4624a998ba14389081188eab7 | [
"MIT"
] | null | null | null | from __future__ import print_function
import os
def build_is_triggered():
"""
If a build is being triggered via Github directly (either by a comment, or
automatically) then the ``ghprb`` will probably be involded. When that is
the case, that plugin injects a wealth of environment variables, which can
tell us if the build is really being handled by the plugin.
"""
ghprb_env_vars = [
'ghprbActualCommit', 'ghprbTriggerAuthor', 'ghprbTargetBranch',
'ghprbTriggerAuthorLogin', 'ghprbCredentialsId', 'ghprbGhRepository',
]
return all([bool(os.environ.get(var, False)) for var in ghprb_env_vars])
def construct_url():
"""
Helper to join the different parts of Github's API url to be able to post
the notification status
"""
GITHUB_REPOSITORY = os.getenv('GITHUB_REPOSITORY')
GITHUB_SHA = os.getenv('GITHUB_SHA')
base_url = "https://api.github.com/repos/"
repository = GITHUB_REPOSITORY.strip('/').strip('"')
repo_url = os.path.join(base_url, repository)
status_url = os.path.join(repo_url, 'statuses')
full_url = os.path.join(status_url, GITHUB_SHA)
print('request url: %s' % full_url)
return full_url
| 34.542857 | 78 | 0.699752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 645 | 0.533499 |
5b8868ab4678426738010cc86dd5157444746730 | 339 | py | Python | tests/__init__.py | jfardello/dyn53 | bf40f9f979273cfcb64817ae8117992ce03acbac | [
"MIT"
] | null | null | null | tests/__init__.py | jfardello/dyn53 | bf40f9f979273cfcb64817ae8117992ce03acbac | [
"MIT"
] | null | null | null | tests/__init__.py | jfardello/dyn53 | bf40f9f979273cfcb64817ae8117992ce03acbac | [
"MIT"
] | null | null | null | import unittest
from . import test_cli, test_client
def suite():
test_suite = unittest.TestSuite()
test_suite.addTests(unittest.makeSuite(test_cli.TestCli))
test_suite.addTests(unittest.makeSuite(test_client.TestClient))
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| 26.076923 | 67 | 0.761062 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 | 0.029499 |
5b88fe4835697ffe0c79e926b04e81fcc84ab9fd | 111 | py | Python | OLD.dir/module2.py | romchegue/Python | 444476088e64d5da66cb00174f3d1d30ebbe38f6 | [
"bzip2-1.0.6"
] | null | null | null | OLD.dir/module2.py | romchegue/Python | 444476088e64d5da66cb00174f3d1d30ebbe38f6 | [
"bzip2-1.0.6"
] | null | null | null | OLD.dir/module2.py | romchegue/Python | 444476088e64d5da66cb00174f3d1d30ebbe38f6 | [
"bzip2-1.0.6"
] | null | null | null | print('starting to load...')
import sys
name = 42
def func(): pass
class klass: pass
print('done loading.')
| 11.1 | 28 | 0.675676 | 17 | 0.153153 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.324324 |
5b89fd1aed3678be46b04253e3670d3d318222c3 | 5,053 | py | Python | src/real_q_voter/visualization.py | robertjankowski/real-q-voter | 250357fe3d504a79b8515c278d052aac04a22585 | [
"MIT"
] | null | null | null | src/real_q_voter/visualization.py | robertjankowski/real-q-voter | 250357fe3d504a79b8515c278d052aac04a22585 | [
"MIT"
] | 4 | 2020-03-27T20:08:15.000Z | 2020-06-04T07:35:18.000Z | src/real_q_voter/visualization.py | robertjankowski/real-q-voter | 250357fe3d504a79b8515c278d052aac04a22585 | [
"MIT"
] | null | null | null | import os
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import glob
from src.real_q_voter.logger import get_logger
from itertools import accumulate
from PIL import Image
logger = get_logger('REAL-Q-VOTER-VISUALIZATION-LOGGER')
def plot_degree_distribution(g: nx.Graph, bins=50, filename=None, file_extension='png'):
"""
Plot degree distribution of given `g` graph.
:param g: nx.Graph (directed or undirected)
:param bins: number of bins for histogram
:param filename: Name of the output figure
:param file_extension: Extension of the output plot
"""
fig, ax = plt.subplots()
if nx.is_directed(g):
in_degrees, out_degrees = _extract_degrees_from_graph(g, is_directed=True)
in_degrees = np.histogram(in_degrees, bins=bins, density=True)
out_degrees = np.histogram(out_degrees, bins=bins, density=True)
ax.loglog(in_degrees[1][:-1], in_degrees[0], label='in_degree')
ax.loglog(out_degrees[1][:-1], out_degrees[0], label='out_degree')
ax.legend()
else:
degrees = _extract_degrees_from_graph(g)
degrees = np.histogram(degrees, bins=bins, density=True)
ax.loglog(degrees[1][:-1], degrees[0])
ax.set_xlabel('k')
ax.set_ylabel('P(k)')
ax.tick_params(which="minor", axis="x", direction="in")
ax.tick_params(which="minor", axis="y", direction="in")
ax.tick_params(which="major", axis="x", direction="in")
ax.tick_params(which="major", axis="y", direction="in")
if filename:
_save_plot_as('../../figures/' + filename, file_extension)
else:
plt.show()
def plot_network(g: nx.Graph, title='', show_opinion=False, filename=None, file_extension='png', **plot_parameters):
"""
Plot `g` network
:param g: nx.Graph
:param title: Title of graph
:param show_opinion: Color network by opinion?
:param filename: Name of the output figure
:param file_extension: Extension of the output plot
"""
opinions = None
if show_opinion:
opinions = np.array(list(nx.get_node_attributes(g, 'opinion').values()))
opinions = ['red' if opinion == 1 else 'blue' for opinion in opinions]
nx.draw(g, pos=nx.spring_layout(g, seed=42), node_color=opinions,
node_size=30, edge_color=[0, 0, 0, 0.2], alpha=0.6,
cmap=plt.cm.jet, **plot_parameters)
plt.title(title)
if filename:
base_path = '../../figures/'
_create_folders(base_path, filename)
_save_plot_as(base_path + filename, file_extension)
plt.close()
plt.show()
def plot_mean_opinion_independence_factor(p_range: list, mean_opinions: list, weighted_mean_opinions: list,
filename=None, file_extension='png'):
"""
Plot relationship between independence factor and mean opinions
:param p_range: Range of `p` independence factor
:param mean_opinions: List of lists mean opinions
:param weighted_mean_opinions: List of lists weighted opinion
:param filename: Name of the output figure
:param file_extension: Extension of the output plot
"""
m = [np.mean(m) for m in mean_opinions]
w = [np.mean(w) for w in weighted_mean_opinions]
plt.plot(p_range, m, label='mean opinion')
plt.plot(p_range, w, label='weighted mean opinion')
plt.xlabel('p')
plt.ylabel(r'$\left<s\right>$')
plt.legend()
if filename:
_save_plot_as('../../figures/' + filename, file_extension)
else:
plt.show()
def convert_images_to_gif(input_path: str, output_name: str):
"""
Convert images into gif
:param input_path: Input path e.g. `../figures/*.png`
:param output_name: Output gif name in input_path directory
"""
output_path = '/'.join(input_path.split('/')[:-1]) + '/' + output_name + '.gif'
img, *imgs = [Image.open(f) for f in sorted(glob.glob(input_path))]
img.save(fp=output_path, format='GIF', append_images=imgs,
save_all=True, duration=400, loop=0)
def _create_folders(base_path: str, filename: str):
folders_names = filename.split('/')[:-1]
folders_names = [folder + '/' for folder in folders_names]
folders_names = list(accumulate(folders_names))
folders = [base_path + folder for folder in folders_names]
for folder in folders:
if not os.path.exists(folder):
os.makedirs(folder)
def _extract_degrees_from_graph(g: nx.Graph, is_directed=False):
if is_directed:
in_degrees = list(map(lambda x: x[1], g.in_degree))
out_degrees = list(map(lambda x: x[1], g.out_degree))
return in_degrees, out_degrees
else:
degrees = list(map(lambda x: x[1], g.degree))
return degrees
def _save_plot_as(filename: str, extension: str):
if extension is 'png':
plt.savefig(filename + '.png', bbox_inches='tight', dpi=400)
elif extension is 'pdf':
plt.savefig(filename + '.pdf')
else:
logger.error('Cannot save plot. Unsupported extension')
plt.close()
| 35.836879 | 116 | 0.661191 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,382 | 0.273501 |
5b8a08f2aa18f87d81614f59e6870e3dfaeef983 | 3,245 | py | Python | tests/entity/test_entity_validate_implementation.py | MacHu-GWU/crawlib-project | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | [
"MIT"
] | 1 | 2020-06-19T09:45:20.000Z | 2020-06-19T09:45:20.000Z | tests/entity/test_entity_validate_implementation.py | MacHu-GWU/crawlib-project | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | [
"MIT"
] | 1 | 2019-12-27T18:41:21.000Z | 2019-12-27T18:41:21.000Z | tests/entity/test_entity_validate_implementation.py | MacHu-GWU/crawlib-project | b2963b7f6a36ee7f1ef95a6bf9d8cb746d9da991 | [
"MIT"
] | 1 | 2018-08-22T01:27:32.000Z | 2018-08-22T01:27:32.000Z | # -*- coding: utf-8 -*-
import pytest
from pytest import raises
from crawlib.entity.base import Entity, RelationshipConfig, Relationship
def test_validate_implementation():
# validate abstract method
class Country(Entity):
pass
with raises(NotImplementedError) as e:
Country._validate_abstract_methods()
assert "Entity.make_test_entity" in str(e)
class Country(Country):
@classmethod
def make_test_entity(cls):
return cls()
with raises(NotImplementedError) as e:
Country._validate_abstract_methods()
assert "Entity.build_url" in str(e)
class Country(Country):
def build_url(self):
return "http://www.example.com/{}".format(self.id)
with raises(NotImplementedError) as e:
Country._validate_abstract_methods()
assert "Entity.build_request" in str(e)
class Country(Country):
def build_request(self, url, **kwargs):
return url
with raises(NotImplementedError) as e:
Country._validate_abstract_methods()
assert "Entity.send_request" in str(e)
class Country(Country):
def send_request(self, request, **kwargs):
return "<html>Hello World</html>"
with raises(NotImplementedError) as e:
Country._validate_abstract_methods()
assert "Entity.parse_response" in str(e)
class Country(Country):
def parse_response(self, url, request, response, **kwargs):
return {"data": None}
with raises(NotImplementedError) as e:
Country._validate_abstract_methods()
assert "Entity.process_pr" in str(e)
# validate configuration
class Country(Country):
def process_pr(self, pres, **kwargs):
pass
with raises(NotImplementedError) as e:
Country._validate_configuration()
assert "CONF_STATUS_KEY" in str(e)
class Country(Country):
CONF_STATUS_KEY = "status"
with raises(NotImplementedError) as e:
Country._validate_configuration()
assert "CONF_EDIT_AT_KEY" in str(e)
class Country(Country):
CONF_EDIT_AT_KEY = "edit_at"
@classmethod
def _validate_orm_related(cls):
pass
Country.validate_implementation()
def test_validate_relationship_config_goodcase1():
class Country(Entity):
n_state = "n_state_field"
class State(Entity):
n_zipcode = "n_zipcode_field"
class Zipcode(Entity): pass
Country.CONF_RELATIONSHIP = RelationshipConfig([
Relationship(State, Relationship.Option.many, "n_state"),
])
State.CONF_RELATIONSHIP = RelationshipConfig([
Relationship(Zipcode, Relationship.Option.many, "n_zipcode"),
])
Entity._validate_relationship_config()
def test_validate_relationship_config_goodcase2():
class ImagePage(Entity):
id = "image_page_id"
class ImageDownload(Entity):
id = "image_page_id"
ImagePage.CONF_RELATIONSHIP = RelationshipConfig([
Relationship(ImageDownload, Relationship.Option.one, None),
])
Entity._validate_relationship_config()
if __name__ == "__main__":
import os
basename = os.path.basename(__file__)
pytest.main([basename, "-s", "--tb=native"])
| 26.169355 | 72 | 0.674884 | 1,127 | 0.347304 | 0 | 0 | 141 | 0.043451 | 0 | 0 | 421 | 0.129738 |
5b8b9e25ca66bf56d38fa883adc8a97a2e94522f | 3,307 | py | Python | subt/dummy_darpa_server.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | 12 | 2017-02-16T10:22:59.000Z | 2022-03-20T05:48:06.000Z | subt/dummy_darpa_server.py | m3d/osgar_archive_2020 | 556b534e59f8aa9b6c8055e2785c8ae75a1a0a0e | [
"MIT"
] | 618 | 2016-08-30T04:46:12.000Z | 2022-03-25T16:03:10.000Z | subt/dummy_darpa_server.py | robotika/osgar | 6f4f584d5553ab62c08a1c7bb493fefdc9033173 | [
"MIT"
] | 11 | 2016-08-27T20:02:55.000Z | 2022-03-07T08:53:53.000Z | #!/usr/bin/env python
"""
Dummy DARPA scoring server
"""
import os
import sys
import csv
import math
import json
import logging
from collections import defaultdict
from http.server import BaseHTTPRequestHandler, HTTPServer
from mimetypes import guess_type
g_logger = logging.getLogger(__name__)
def dist3d(xyz, xyz2):
return math.sqrt(sum([(a-b)**2 for a, b in zip(xyz, xyz2)]))
class GameLogic:
def __init__(self, filename):
self.score = 0
self.artf = defaultdict(list)
with open(filename) as csvfile:
reader = csv.reader(csvfile)
for raw in reader:
if 'artf' in raw:
continue
# artifact name, x, y, z
artf = raw[0]
self.artf[artf].append(tuple(float(x) for x in raw[1:]))
def report_artf(self, artf, xyz):
if artf in self.artf:
best = None
for artf_xyz in self.artf[artf]:
if best is None or dist3d(xyz, artf_xyz) < best[0]:
best = dist3d(xyz, artf_xyz), artf_xyz
if best is not None and best[0] < 5.0: # DARPA 5m limit
self.artf[artf].remove(best[1])
self.score += 1
return True
return False
class MyHandler(BaseHTTPRequestHandler):
def do_GET(self):
g_logger.info(f"GET: {self.path}")
s = self.path.split('/')
g_logger.info(str(s))
self.send_response(200)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(b'{"score":%d,"remaining_reports":97,"current_team":"robotika","run_clock":1502.8}' % self.server.game_logic.score)
def do_POST(self):
g_logger.info(f"POST: {self.path}")
s = self.path.split('/')
assert self.headers['Content-Type'] == 'application/json', self.headers['Content-Type']
assert 'artifact_reports' in s, s
size = int(self.headers['Content-Length'])
data = self.rfile.read(size)
g_logger.info(f'DATA {data}')
d = json.loads(data)
self.server.game_logic.report_artf(d['type'], (d['x'], d['y'], d['z']))
self.send_response(201)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(b'{"url":"http://10.100.2.200:8000/api/reports/3/","id":3,"x":1.0,"y":2.0,"z":4.0,"type":"Cell Phone","submitted_datetime":"2020-02-18T22:40:05.009145+00:00","run_clock":1505.0,"team":"robotika","run":"0.0.2","report_status":"scored","score_change":1}')
def main():
import argparse
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M',
)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('filename', help='CSV file with header "artf, x, y, z"')
args = parser.parse_args()
try:
server = HTTPServer(('',8888), MyHandler)
server.game_logic = GameLogic(args.filename)
print('started httpserver...')
server.serve_forever()
except KeyboardInterrupt:
print('keyboard interrupt')
server.socket.close()
if __name__ == '__main__':
main()
# vim: expandtab sw=4 ts=4
| 32.742574 | 278 | 0.599335 | 2,195 | 0.663744 | 0 | 0 | 0 | 0 | 0 | 0 | 855 | 0.258542 |
5b8eda9c95d489f300e7c9259bd30883f72e24c7 | 637 | py | Python | app/models/item.py | mtawil/item-catalog | 1a46ab7a915bf4092a3cbee840937a6d8a9d0673 | [
"MIT"
] | 1 | 2022-01-28T18:57:39.000Z | 2022-01-28T18:57:39.000Z | app/models/item.py | mtawil/item-catalog | 1a46ab7a915bf4092a3cbee840937a6d8a9d0673 | [
"MIT"
] | null | null | null | app/models/item.py | mtawil/item-catalog | 1a46ab7a915bf4092a3cbee840937a6d8a9d0673 | [
"MIT"
] | null | null | null | from orator import mutator
from slugify import slugify
from app import db
from orator.orm import belongs_to
class Item(db.Model):
__table__ = 'items'
__fillable__ = ['title', 'description']
__hidden__ = ['slug', 'category_id', 'user_id']
__timestamps__ = False
@mutator
def title(self, value):
self.set_raw_attribute('title', value)
self.set_raw_attribute('slug', slugify(value))
@belongs_to
def category(self):
from app.models.category import Category
return Category
@belongs_to
def user(self):
from app.models.user import User
return User
| 21.965517 | 54 | 0.66562 | 526 | 0.825746 | 0 | 0 | 340 | 0.533752 | 0 | 0 | 68 | 0.10675 |
5b90d091cb2251945499a059e54ea7667ca3efce | 2,004 | py | Python | tests/test_retry_middleware.py | 0xfede7c8/scrapy-fake-useragent | 3e5ed91e0fb15789bea13909ec9f17a6fa4c17e8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_retry_middleware.py | 0xfede7c8/scrapy-fake-useragent | 3e5ed91e0fb15789bea13909ec9f17a6fa4c17e8 | [
"BSD-3-Clause"
] | null | null | null | tests/test_retry_middleware.py | 0xfede7c8/scrapy-fake-useragent | 3e5ed91e0fb15789bea13909ec9f17a6fa4c17e8 | [
"BSD-3-Clause"
] | null | null | null | import pytest
from scrapy import Request
from scrapy.http import Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
from twisted.internet.error import DNSLookupError
from scrapy_fake_useragent.middleware import RetryUserAgentMiddleware
@pytest.fixture
def retry_middleware_response(request):
"""
Fixture to simplify creating a crawler
with an activated middleware and going through
the request-response cycle.
Executes process_response() method of the middleware.
"""
settings, status = request.param
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider('foo')
mw = RetryUserAgentMiddleware.from_crawler(crawler)
req = Request('http://www.scrapytest.org/')
rsp = Response(req.url, body=b'', status=status)
yield mw.process_response(req, rsp, spider)
@pytest.fixture
def retry_middleware_exception(request):
"""
Fixture to simplify creating a crawler
with an activated retry middleware and going through
the request-response cycle.
Executes process_exception() method of the middleware.
"""
settings, exception = request.param
crawler = get_crawler(Spider, settings_dict=settings)
spider = crawler._create_spider('foo')
mw = RetryUserAgentMiddleware.from_crawler(crawler)
req = Request('http://www.scrapytest.org/')
yield mw.process_exception(req, exception, spider)
@pytest.mark.parametrize(
'retry_middleware_response',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'}, 503), ),
indirect=True
)
def test_random_ua_set_on_response(retry_middleware_response):
assert 'User-Agent' in retry_middleware_response.headers
@pytest.mark.parametrize(
'retry_middleware_exception',
(({'FAKEUSERAGENT_FALLBACK': 'firefox'},
DNSLookupError('Test exception')), ),
indirect=True
)
def test_random_ua_set_on_exception(retry_middleware_exception):
assert 'User-Agent' in retry_middleware_exception.headers
| 28.628571 | 69 | 0.753493 | 0 | 0 | 1,137 | 0.567365 | 1,720 | 0.858283 | 0 | 0 | 629 | 0.313872 |
5b9151562ac70dbe8d93818415cfa9bf040487d1 | 1,186 | py | Python | tests/gateway/test_virtonomica.py | xlam/autovirt | a19f9237c8b1123ce4f4b8b396dc88122019d4f8 | [
"MIT"
] | null | null | null | tests/gateway/test_virtonomica.py | xlam/autovirt | a19f9237c8b1123ce4f4b8b396dc88122019d4f8 | [
"MIT"
] | null | null | null | tests/gateway/test_virtonomica.py | xlam/autovirt | a19f9237c8b1123ce4f4b8b396dc88122019d4f8 | [
"MIT"
] | null | null | null | from autovirt.gateway.virtonomica.shopgateway import ShopGateway
def data(shop_id: int) -> list[dict]:
return [
{
"shop_unit_id": shop_id,
"sales_volume": 100,
"purchased_amount": 100,
"quantity": 100,
"goods_category_id": 0,
},
{
"shop_unit_id": shop_id,
"sales_volume": 100,
"purchased_amount": 200,
"quantity": 300,
"goods_category_id": 0,
},
{
"shop_unit_id": shop_id,
"sales_volume": 100,
"purchased_amount": 1000,
"quantity": 10000,
"goods_category_id": 0,
},
]
class ShopApiMock:
def __init__(self):
self._shop_id = None
def fetch_shopboard(self, shop_id: int) -> list[dict]:
self._shop_id = shop_id
return data(shop_id)
def test_shop_gateway():
shop_id = 1
gw = ShopGateway(ShopApiMock())
res = gw.get_shopboard(shop_id)
from autovirt.domain.shopboard import ShopBoard
assert isinstance(res, ShopBoard)
assert len(res) == len(data(shop_id))
assert res[0].unit_id == shop_id
| 24.708333 | 64 | 0.554806 | 192 | 0.161889 | 0 | 0 | 0 | 0 | 0 | 0 | 225 | 0.189713 |
5b9151ea0896990566bf4da390f7e5b70a755ccf | 1,579 | py | Python | tests/python/physics_cloth.py | gunslingster/CSC581-assignement1 | 39012146e142bf400c7140d90ecfd27c45b589ca | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 39 | 2020-05-26T15:21:14.000Z | 2022-03-24T04:46:31.000Z | tests/python/physics_cloth.py | gunslingster/CSC581-assignement1 | 39012146e142bf400c7140d90ecfd27c45b589ca | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 7 | 2020-05-11T14:04:54.000Z | 2020-06-03T15:00:20.000Z | tests/python/physics_cloth.py | gunslingster/CSC581-assignement1 | 39012146e142bf400c7140d90ecfd27c45b589ca | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 5 | 2020-08-03T13:03:29.000Z | 2021-08-07T22:10:26.000Z | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import os
import sys
import bpy
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from modules.mesh_test import ModifierTest, PhysicsSpec
def main():
test = [
["testCloth", "expectedCloth",
[PhysicsSpec('Cloth', 'CLOTH', {'quality': 5}, 35)]],
]
cloth_test = ModifierTest(test, threshold=1e-3)
command = list(sys.argv)
for i, cmd in enumerate(command):
if cmd == "--run-all-tests":
cloth_test.apply_modifiers = True
cloth_test.run_all_tests()
break
elif cmd == "--run-test":
cloth_test.apply_modifiers = False
index = int(command[i + 1])
cloth_test.run_test(index)
break
if __name__ == "__main__":
main()
| 30.365385 | 74 | 0.662445 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 883 | 0.559215 |
5b915c3b17eeed0173d5f856392260bac3c66028 | 5,054 | py | Python | nowcasting_dataset/manager/base.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | null | null | null | nowcasting_dataset/manager/base.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | null | null | null | nowcasting_dataset/manager/base.py | JanEbbing/nowcasting_dataset | f907054a457987e6f6dbb13bfb65fc5359c6f680 | [
"MIT"
] | null | null | null | """Base Manager class."""
import logging
from pathlib import Path
from typing import Optional
import nowcasting_dataset.utils as nd_utils
from nowcasting_dataset import config
from nowcasting_dataset.data_sources import ALL_DATA_SOURCE_NAMES, MAP_DATA_SOURCE_NAME_TO_CLASS
logger = logging.getLogger(__name__)
class ManagerBase:
"""The Manager initializes and manage a dict of DataSource objects.
Attrs:
config: Configuration object.
data_sources: dict[str, DataSource]
data_source_which_defines_geospatial_locations: DataSource: The DataSource used to compute the
geospatial locations of each example.
save_batches_locally_and_upload: bool: Set to True by `load_yaml_configuration()` if
`config.process.upload_every_n_batches > 0`.
"""
def __init__(self) -> None: # noqa: D107
self.config = None
self.data_sources = {}
self.data_source_which_defines_geospatial_locations = None
def load_yaml_configuration(self, filename: str, set_git: bool = True) -> None:
"""Load YAML config from `filename`."""
logger.debug(f"Loading YAML configuration file {filename}")
self.config = config.load_yaml_configuration(filename)
if set_git:
self.config = config.set_git_commit(self.config)
self.save_batches_locally_and_upload = self.config.process.upload_every_n_batches > 0
logger.debug(f"config={self.config}")
def initialize_data_sources(
self, names_of_selected_data_sources: Optional[list[str]] = ALL_DATA_SOURCE_NAMES
) -> None:
"""Initialize DataSources specified in the InputData configuration.
For each key in each DataSource's configuration object, the string `<data_source_name>_`
is removed from the key before passing to the DataSource constructor. This allows us to
have verbose field names in the configuration YAML files, whilst also using standard
constructor arguments for DataSources.
"""
for data_source_name in names_of_selected_data_sources:
logger.debug(f"Creating {data_source_name} DataSource object.")
config_for_data_source = getattr(self.config.input_data, data_source_name)
if config_for_data_source is None:
logger.info(f"No configuration found for {data_source_name}.")
continue
config_for_data_source = config_for_data_source.dict()
config_for_data_source.pop("log_level")
# save config to data source logger
data_source_logger = logging.getLogger(
f"nowcasting_dataset.data_sources.{data_source_name}"
)
data_source_logger.debug(
f"The configuration for {data_source_name} is {config_for_data_source}"
)
# Strip `<data_source_name>_` from the config option field names.
config_for_data_source = nd_utils.remove_regex_pattern_from_keys(
config_for_data_source, pattern_to_remove=f"^{data_source_name}_"
)
data_source_class = MAP_DATA_SOURCE_NAME_TO_CLASS[data_source_name]
try:
data_source = data_source_class(**config_for_data_source)
except Exception:
logger.exception(f"Exception whilst instantiating {data_source_name}!")
raise
self.data_sources[data_source_name] = data_source
# Set data_source_which_defines_geospatial_locations:
try:
self.data_source_which_defines_geospatial_locations = self.data_sources[
self.config.input_data.data_source_which_defines_geospatial_locations
]
except KeyError:
if self._locations_csv_file_exists():
logger.info(
f"{self.config.input_data.data_source_which_defines_geospatial_locations=}"
" is not a member of the DataSources, but that does not matter because the CSV"
" files which specify the locations of the examples already exists!"
)
else:
msg = (
"input_data.data_source_which_defines_geospatial_locations="
f"{self.config.input_data.data_source_which_defines_geospatial_locations}"
" is not a member of the DataSources, so cannot set"
" self.data_source_which_defines_geospatial_locations!"
f" The available DataSources are: {list(self.data_sources.keys())}"
)
logger.error(msg)
raise RuntimeError(msg)
else:
logger.info(
f"DataSource `{data_source_name}` set as"
" data_source_which_defines_geospatial_locations."
)
def _locations_csv_file_exists(self):
return False
def _filename_of_locations_csv_file(self, split_name: str) -> Path:
return self.config.output_data.filepath / split_name
| 44.725664 | 100 | 0.666601 | 4,738 | 0.937475 | 0 | 0 | 0 | 0 | 0 | 0 | 2,097 | 0.414919 |
5b9693fa2b04beeaf69c92e83d17519a3a978143 | 764 | py | Python | GatewayServis/API/ImageAPI.py | CommName/WildeLifeWatcher | 3ce3b564d0e6cc81ebc2b607a712580d3c388db6 | [
"MIT"
] | null | null | null | GatewayServis/API/ImageAPI.py | CommName/WildeLifeWatcher | 3ce3b564d0e6cc81ebc2b607a712580d3c388db6 | [
"MIT"
] | null | null | null | GatewayServis/API/ImageAPI.py | CommName/WildeLifeWatcher | 3ce3b564d0e6cc81ebc2b607a712580d3c388db6 | [
"MIT"
] | null | null | null | import cherrypy
import requests
import json
from CommunicationLayer import ServiceRegistry
@cherrypy.popargs('imageName')
class ImageAPI(object):
address = "http://127.0.0.1:8761/"
@cherrypy.expose()
def index(self, imageName):
#Get data centaras
servicesArray = ServiceRegistry.getServices("Data")
s = requests.Session()
for service in servicesArray:
response = s.get(service["ServiceAddress"]+"/image/"+imageName,)
if response.status_code >= 200 and response.status_code < 300:
cherrypy.response.headers["Content-Type"] = 'image/jpeg'
return response.content
raise cherrypy.HTTPError(404, "Your image could not be found in any active service")
| 28.296296 | 92 | 0.662304 | 638 | 0.835079 | 0 | 0 | 669 | 0.875654 | 0 | 0 | 163 | 0.213351 |
5b96dfeca9efe5e44a9af8ca8317c32891758506 | 1,489 | py | Python | tests/seahub/invitations/test_views.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/seahub/invitations/test_views.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | tests/seahub/invitations/test_views.py | Xandersoft/seahub | f75f238b3e0a907e8a8003f419e367fa36e992e7 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | from django.utils import timezone
from django.core.urlresolvers import reverse
from seahub.invitations.models import Invitation
from seahub.test_utils import BaseTestCase
class TokenViewTest(BaseTestCase):
def setUp(self):
self.accepter = 'random@foo.com'
self.iv = Invitation.objects.add(inviter=self.user.username,
accepter=self.accepter)
self.url = reverse('invitations:token_view', args=[self.iv.token])
def tearDown(self):
self.remove_user(self.accepter)
def test_get(self):
resp = self.client.get(self.url)
self.assertEqual(200, resp.status_code)
self.assertRegexpMatches(resp.content, 'Set your password')
def test_expired_token(self):
self.iv.expire_time = timezone.now()
self.iv.save()
resp = self.client.get(self.url)
self.assertEqual(404, resp.status_code)
def test_post(self):
assert self.iv.accept_time is None
resp = self.client.post(self.url, {
'password': 'passwd'
})
self.assertEqual(302, resp.status_code)
assert Invitation.objects.get(pk=self.iv.pk).accept_time is not None
def test_post_empty_password(self):
assert self.iv.accept_time is None
resp = self.client.post(self.url, {
'password': '',
})
self.assertEqual(302, resp.status_code)
assert Invitation.objects.get(pk=self.iv.pk).accept_time is None
| 33.840909 | 76 | 0.650772 | 1,314 | 0.882471 | 0 | 0 | 0 | 0 | 0 | 0 | 89 | 0.059772 |
5b983dc44d6c799abc8012a0d7ea31430d04c3b1 | 6,289 | py | Python | projects/seeker/tasks/search_decision.py | DrMatters/ParlAI | 755b9dcb778deb5a82029d69ae3260579c6450f1 | [
"MIT"
] | 1 | 2022-03-27T17:16:19.000Z | 2022-03-27T17:16:19.000Z | projects/seeker/tasks/search_decision.py | DrMatters/ParlAI | 755b9dcb778deb5a82029d69ae3260579c6450f1 | [
"MIT"
] | null | null | null | projects/seeker/tasks/search_decision.py | DrMatters/ParlAI | 755b9dcb778deb5a82029d69ae3260579c6450f1 | [
"MIT"
] | 1 | 2022-03-30T14:05:29.000Z | 2022-03-30T14:05:29.000Z | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
SeeKeR Search Decision Tasks.
"""
from typing import Optional
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.teachers import MultiTaskTeacher
import parlai.tasks.convai2.agents as convai2
import parlai.tasks.empathetic_dialogues.agents as ed
import parlai.tasks.wizard_of_internet.agents as woi
import parlai.tasks.wizard_of_wikipedia.agents as wow
import parlai.tasks.squad.agents as squad
import parlai.tasks.triviaqa.agents as triviaqa
import parlai.tasks.natural_questions.agents as nq
import parlai.tasks.msc.agents as msc
import parlai.utils.logging as logging
import projects.seeker.tasks.mutators # type: ignore
class WoiSearchDecisionTeacher(woi.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'flatten',
'woi_dropout_retrieved_docs',
'woi_maybe_generate_search_query_mutator',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'WoiSearchDecisionTeacher'
class WowSearchDecisionTeacher(wow.DefaultTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
[
'flatten',
'wow_maybe_generate_search_query_mutator',
'skip_retrieval_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
opt['add_missing_turns'] = 'all'
super().__init__(opt, shared)
self.id = 'WowSearchDecisionTeacher'
class SquadSearchDecisionTeacher(squad.OpensquadTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
['do_generate_search_query_mutator', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'SquadSearchDecisionTeacher'
class TriviaQASearchDecisionTeacher(triviaqa.NoEvidenceWebTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
['do_generate_search_query_mutator', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'TriviaQASearchDecisionTeacher'
class NQSearchDecisionTeacher(nq.NaturalQuestionsOpenTeacher):
def __init__(self, opt, shared=None):
mutators = '+'.join(
['do_generate_search_query_mutator', 'skip_retrieval_mutator']
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
opt['mutators'] = mutators
super().__init__(opt, shared)
self.id = 'NQSearchDecisionTeacher'
def get_dialogue_task_mutators(opt: Opt) -> str:
"""
Set the mutators appropriately for the dialogue tasks.
"""
mutators = '+'.join(
[
'flatten',
'skip_retrieval_mutator',
'bst_tasks_maybe_generate_search_query_mutator',
]
)
if opt.get('mutators'):
mutators = '+'.join([mutators, opt['mutators']])
logging.warning(f'overriding mutators to {mutators}')
return mutators
class Convai2SearchDecisionTeacher(convai2.NormalizedTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['task'] += ':no_cands'
super().__init__(opt, shared)
self.id = 'Convai2SearchDecisionTeacher'
class EDSearchDecisionTeacher(ed.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
super().__init__(opt, shared)
self.id = 'EDSearchDecisionTeacher'
class MSCSearchDecisionTeacher(msc.DefaultTeacher):
def __init__(self, opt, shared=None):
opt['mutators'] = get_dialogue_task_mutators(opt)
opt['include_session1'] = False
super().__init__(opt, shared)
self.id = 'MSCSearchDecisionTeacher'
class SearchDecisionTeacher(MultiTaskTeacher):
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
WoiSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
WowSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
SquadSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
TriviaQASearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
NQSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
Convai2SearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
EDSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
MSCSearchDecisionTeacher.add_cmdline_args(parser, partial_opt)
return parser
def __init__(self, opt, shared=None):
tasks = [
f"projects.seeker.tasks.search_decision:{teacher}"
for teacher in [
'WoiSearchDecisionTeacher',
'WowSearchDecisionTeacher',
'SquadSearchDecisionTeacher',
'TriviaQASearchDecisionTeacher',
'NQSearchDecisionTeacher',
'Convai2SearchDecisionTeacher',
'EDSearchDecisionTeacher',
'MSCSearchDecisionTeacher',
]
]
opt['task'] = ','.join(tasks)
super().__init__(opt, shared)
class DefaultTeacher(SearchDecisionTeacher):
pass
| 35.331461 | 75 | 0.657497 | 4,924 | 0.782954 | 0 | 0 | 729 | 0.115917 | 0 | 0 | 1,749 | 0.278105 |
5b994b983a9b32668d74419e6514f5b302e70c33 | 445 | py | Python | django101/django101/urls.py | nrgxtra/web_basics | 073ccb361af666c9fe2b3fa0b5cf74d721acb1b4 | [
"MIT"
] | null | null | null | django101/django101/urls.py | nrgxtra/web_basics | 073ccb361af666c9fe2b3fa0b5cf74d721acb1b4 | [
"MIT"
] | null | null | null | django101/django101/urls.py | nrgxtra/web_basics | 073ccb361af666c9fe2b3fa0b5cf74d721acb1b4 | [
"MIT"
] | null | null | null |
from django.contrib import admin
from django.urls import path, include
from django101 import cities
from django101.cities.views import index, list_phones, test_index, create_person
urlpatterns = [
path('admin/', admin.site.urls),
path('test/', test_index),
path('create/', create_person, name='create person'),
path('cities/', include('django101.cities.urls')),
path('', include('django101.people.urls')),
]
| 29.666667 | 81 | 0.694382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 96 | 0.21573 |
5b9a1344434efcaa1dceca50f39a0f10202d1fd1 | 4,150 | py | Python | helpers.py | gyhor/redmein | 2dfe8b32ae4a899c1c3df7e430fb7b93931a2885 | [
"MIT"
] | null | null | null | helpers.py | gyhor/redmein | 2dfe8b32ae4a899c1c3df7e430fb7b93931a2885 | [
"MIT"
] | null | null | null | helpers.py | gyhor/redmein | 2dfe8b32ae4a899c1c3df7e430fb7b93931a2885 | [
"MIT"
] | 1 | 2021-01-21T19:19:46.000Z | 2021-01-21T19:19:46.000Z | from __future__ import print_function
from datetime import date, datetime, timedelta
import os
import tempfile
PERIODS = {
'y': {'name': 'yesterday', 'description': 'Yesterday'},
'lw': {'name': 'lastweek', 'description': 'Last work week'},
'cw': {'name': 'currentweek', 'description': 'Current work week'},
'flw': {'name': 'fulllastweek', 'description': 'Last full week'},
'fcw': {'name': 'fullcurrentweek', 'description': 'Current full week'}
}
def time_entry_list(from_date, to_date, user, redmine):
print("Fetching time entries from {} to {} for {}...".format(from_date, to_date, user))
print()
# Get yesterday's time entries
time_entries = redmine.time_entry.filter(user_id=user.id, from_date=from_date, to_date=to_date, sort='hours:desc')
if time_entries:
sum = 0
# Print scrum update template
report = "Time entries:\n"
for entry in time_entries:
report += entry_bullet_point(entry)
sum += entry.hours
report += "\n" + str(sum) + " hours.\n"
else:
report = "No time entries.\n"
print(report)
def entry_bullet_point(entry):
if hasattr(entry, 'issue'):
issue_id = '#' + str(entry.issue.id)
else:
issue_id = 'none'
item = '* {} / {} hours ({})'.format(entry.comments, str(entry.hours), issue_id)
item = item + ' [' + str(entry.id) + ']'
item = item + ' ' + str(entry.activity)
return item + "\n"
def handle_date_calculation_value(date_value):
if date_value[:1] == '+' or date_value[:1] == '-':
date_value_raw = date.today() + timedelta(int(date_value))
date_value = date_value_raw.strftime('%Y-%m-%d')
return date_value
def weekday_of_week(day_of_week, weeks_previous=0):
days_ahead_of_weekday_last_week = date.today().weekday() + (weeks_previous * 7) - day_of_week
last_weekday = datetime.now() - timedelta(days=days_ahead_of_weekday_last_week)
return last_weekday.strftime("%Y-%m-%d")
def weekday_last_week(day_of_week):
return weekday_of_week(day_of_week, 1)
def resolve_period_abbreviation(period):
period = period.lower()
if period in PERIODS:
return PERIODS[period]['name']
if period in {abbr: item.get('name') for abbr, item in PERIODS.items()}.values():
return period
return None
def resolve_period(period):
if period == 'yesterday':
yesterday = handle_date_calculation_value('-1')
return {'start': yesterday, 'end': yesterday}
if period == 'lastweek':
start_date = weekday_last_week(0) # last Monday
end_date = weekday_last_week(4) # last Friday
return {'start': start_date, 'end': end_date}
if period == 'currentweek':
start_date = weekday_of_week(0) # this Monday
end_date = weekday_of_week(4) # this Friday
return {'start': start_date, 'end': end_date}
if period == 'fulllastweek':
start_date = weekday_of_week(6, 2) # last Sunday
end_date = weekday_of_week(5, 1) # last Saturday
return {'start': start_date, 'end': end_date}
if period == 'fullcurrentweek':
start_date = weekday_last_week(6) # this Sunday
end_date = weekday_of_week(5) # this Saturday
return {'start': start_date, 'end': end_date}
def resolve_activity_alias(activity_name, aliases):
if activity_name in aliases:
return resolve_activity_alias(aliases[activity_name], aliases)
else:
return activity_name
def resolve_issue_template(issue_name, templates):
if issue_name in templates:
return templates[issue_name]
def template_field(issue_name, field, templates):
template = resolve_issue_template(issue_name, templates)
if template and field in template:
return template[field]
def resolve_issue_alias(issue_id, templates):
resolved_id = template_field(issue_id, 'id', templates)
if resolved_id:
return resolve_issue_alias(resolved_id, templates)
else:
return issue_id
def get_cache_filename(type_name):
return os.path.join(tempfile.gettempdir(), 'redmein-{}'.format(type_name))
| 29.642857 | 118 | 0.659759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 775 | 0.186747 |
5b9a8dd256cc2cefb07a2f6abdab4d4f9808a5e9 | 1,695 | py | Python | GeneratebestpolTextMaxAverageReturn.py | TakuyaHiraoka/Learning-Robust-Options-by-Conditional-Value-at-Risk-Optimization | 78d3f8e36cdc954897ddc6af9029991bff19fb58 | [
"MIT"
] | 9 | 2019-12-11T20:34:20.000Z | 2021-05-23T04:35:29.000Z | GeneratebestpolTextMaxAverageReturn.py | TakuyaHiraoka/Learning-Robust-Options-by-Conditional-Value-at-Risk-Optimization | 78d3f8e36cdc954897ddc6af9029991bff19fb58 | [
"MIT"
] | null | null | null | GeneratebestpolTextMaxAverageReturn.py | TakuyaHiraoka/Learning-Robust-Options-by-Conditional-Value-at-Risk-Optimization | 78d3f8e36cdc954897ddc6af9029991bff19fb58 | [
"MIT"
] | null | null | null | import os
import re
import statistics
def find_all_key_files_path(directory, keyfile_name):
fn = re.compile(".*"+keyfile_name + ".*")
path=[]
for root, dirs, files in os.walk(directory):
for file in files:
if fn.match(file) is not None:
#print(file)
path.append(os.path.join(root, file))
return path
if __name__ == '__main__':
# experiment setup params.
root_path = "./"
epoch = 0
# find result files by recursively opening directories.
result_files = find_all_key_files_path(root_path, ".csv")
print(result_files)
print()
# open files
results = []
for result_file in result_files:
# find the best one
f_learning_log = open(result_file, "r")
i = 0
bestpol_id = -1
bestpol_score = -99999.0
scores=[]
for line in f_learning_log.readlines():
if (i % 5) == 0 and (i!=0):
scores.append(float(line.split(",")[-1]))
id = int(line.split(",")[0])
print(scores)
print(id)
if bestpol_score < statistics.mean(scores):
bestpol_id = id
bestpol_score = statistics.mean(scores)
scores = []
elif i != 0:
scores.append(float(line.split(",")[-1]))
i += 1
print(bestpol_id)
print(bestpol_score)
#
dirname = result_file.split("_results")[0] + "saves/"
print(dirname)
f_saves = open(dirname+"bestpol-cvar.txt", "w")
f_saves.write(str(bestpol_id))
f_saves.close()
f_learning_log.close() | 29.224138 | 61 | 0.537463 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 205 | 0.120944 |
5b9b1eea85ebeac6ed18057f8f6bb2b43f18b42d | 2,573 | py | Python | examples/2_commit-simple.py | sgibson91/github_api_test | d294c4c792a6af929fb789d4004c87260a3cee7b | [
"MIT"
] | 1 | 2021-09-22T08:04:44.000Z | 2021-09-22T08:04:44.000Z | examples/2_commit-simple.py | sgibson91/github-commits-over-api | d294c4c792a6af929fb789d4004c87260a3cee7b | [
"MIT"
] | null | null | null | examples/2_commit-simple.py | sgibson91/github-commits-over-api | d294c4c792a6af929fb789d4004c87260a3cee7b | [
"MIT"
] | 9 | 2021-09-24T11:17:54.000Z | 2021-09-24T11:32:03.000Z | import argparse
import base64
import os
import subprocess
import sys
import tempfile
import requests
def parse_args(args):
parser = argparse.ArgumentParser(
description="""
A simplified script to make a commit to the default branch of a repository over
the GitHub API
"""
)
parser.add_argument(
"repository", type=str, help="The repository to commit to in the form USER/REPO"
)
parser.add_argument(
"filepath",
type=str,
help="The path to the file to be edited, relative to the repository root",
)
return parser.parse_args()
def main():
# Parse arguments from the command line
args = parse_args(sys.argv[1:])
# Verify GITHUB_TOKEN has been set in the environment
token = os.environ["GITHUB_TOKEN"] if "GITHUB_TOKEN" in os.environ else None
if token is None:
raise ValueError("GITHUB_TOKEN must be set in the environment!")
# Set API URL
API_ROOT = "https://api.github.com"
repo_api = "/".join([API_ROOT, "repos", args.repository])
# Create a requests header
HEADER = {
"Accept": "application/vnd.github.v3+json",
"Authorization": f"token {token}",
}
# === Begin making the changes and commit === #
# Step 1. Get relevant URLs
url = "/".join([repo_api, "contents", args.filepath])
resp = requests.get(url, headers=HEADER)
resp = resp.json()
blob_sha = resp["sha"]
# Step 2. Updating the file
# 2a. Getting the file contents
resp = requests.get(resp["download_url"], headers=HEADER)
# 2b. Dump the contents to a temporary file
tmpf = tempfile.NamedTemporaryFile()
with open(tmpf.name, "w") as f:
f.write(resp.text)
# 2c. Edit the file contents
subprocess.run(["nano", tmpf.name])
# 2d. Read in the edited file
with open(tmpf.name, "r") as f:
file_contents = f.read()
# 2e. Clean up the temporary file
tmpf.close()
# Step 4. Encode the file contents
encoded_file_contents = file_contents.encode("ascii")
base64_bytes = base64.b64encode(encoded_file_contents)
raw_contents = base64_bytes.decode("utf-8")
# Step 5. Replace the file in the repository
print("Please provide a commit message:")
msg = input("> ")
url = "/".join([repo_api, "contents", args.filepath])
body = {
"message": msg,
"content": raw_contents,
"sha": blob_sha,
}
requests.put(url, json=body, headers=HEADER)
# That's it! HEAD is automatically updated!
if __name__ == "__main__":
main()
| 26.255102 | 88 | 0.644773 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,081 | 0.420132 |
5b9b2b0153f61f01eed085b07622c7e582fd44d9 | 1,013 | py | Python | src/parruc/flexslider/interfaces.py | parruc/parruc.flexslider | b0074ba060db4a9813dca5c0da29d03ed386ce95 | [
"MIT"
] | null | null | null | src/parruc/flexslider/interfaces.py | parruc/parruc.flexslider | b0074ba060db4a9813dca5c0da29d03ed386ce95 | [
"MIT"
] | null | null | null | src/parruc/flexslider/interfaces.py | parruc/parruc.flexslider | b0074ba060db4a9813dca5c0da29d03ed386ce95 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Module where all interfaces, events and exceptions live."""
from . import _
from plone.app.vocabularies.catalog import CatalogSource
from plone.namedfile.field import NamedBlobImage
from plone.supermodel import model
from z3c.relationfield.schema import RelationChoice
from zope import schema
from zope.publisher.interfaces.browser import IDefaultBrowserLayer
launches = CatalogSource(portal_type=("Document", "News Item"))
class IParrucFlexsliderLayer(IDefaultBrowserLayer):
"""Marker interface that defines a browser layer."""
class ISlide(model.Schema):
image = NamedBlobImage(
title=_("Immagine slide"),
description=_(u"Dimensione consigliata 1200x300"),
required=True,
)
bw = schema.Bool(
title=_(u"Convertire in bianco e nero?"),
default=True,
)
link = RelationChoice(
title=_(u"Contenuto dal linkare nella slide"),
source=launches,
required=False,
)
model.primary('image')
| 25.974359 | 66 | 0.706811 | 553 | 0.545903 | 0 | 0 | 0 | 0 | 0 | 0 | 282 | 0.278381 |
5b9c535490c51d3506b243f651f0c9960ebce1df | 1,694 | py | Python | eth_data_collector/block.py | Whitecoin-XWC/Whitecoin-CrosschainMidware | 7560d80f3bac09250ed365e7e6b9239209e7e502 | [
"Apache-2.0"
] | null | null | null | eth_data_collector/block.py | Whitecoin-XWC/Whitecoin-CrosschainMidware | 7560d80f3bac09250ed365e7e6b9239209e7e502 | [
"Apache-2.0"
] | null | null | null | eth_data_collector/block.py | Whitecoin-XWC/Whitecoin-CrosschainMidware | 7560d80f3bac09250ed365e7e6b9239209e7e502 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
__author__ = 'hasee'
import json
from datetime import datetime
class BlockInfo(object):
def __init__(self):
# 块hash
self.block_id = ''
# 块高度
self.block_num = 0
# 块大小
self.block_size = 0
# 上个块的块hash
self.previous = ''
# 块中交易信息摘要
self.trx_digest = ''
# 出块代理
self.miner = ''
# 出块时间
self.block_time = ''
# 块中交易
self.transactions = []
# 块中交易总数量
self.trx_count = 0
# 出块奖励
self.block_bonus = 0
# 块交易金额
self.trx_amount = 0
#块手续费
self.trx_fee = 0
def from_block_resp(self, block_result):
self.block_id = (block_result.get("hash"))
self.block_num = int(block_result.get("number"),16)
self.block_size = int(block_result.get("size"),16)
self.previous = (block_result.get("parentHash"))
self.trx_digest = (block_result.get("transactionsRoot"))
self.block_time = datetime.fromtimestamp(int(block_result.get("timestamp"),16)).strftime("%Y-%m-%d %H:%M:%S")
self.transactions = block_result.get("transactions")
self.block_bonus = 5.0
self.trx_count = len(self.transactions)
self.amount = 0.0
self.fee = 0.0
def get_json_data(self):
return {"blockHash":self.block_id,"chainId":"eth","blockNumber":self.block_num,"blockSize":self.block_size,
"previous":self.previous,"trxDigest":self.trx_digest,"transactionsCount":self.trx_count,
"trxamount":self.trx_amount,"trxfee":self.trx_fee,"createtime":datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
| 30.25 | 117 | 0.59209 | 1,688 | 0.938821 | 0 | 0 | 0 | 0 | 0 | 0 | 476 | 0.264739 |
5b9fcea89591f2e56179d543a677ddc94c981056 | 2,448 | py | Python | tests/cmdline.py | robertschulze/dirsync | 5eae39e0e4a85d3d71c4690081b5f88e5efa9117 | [
"MIT"
] | 65 | 2016-02-23T17:25:51.000Z | 2022-03-31T01:03:41.000Z | tests/cmdline.py | robertschulze/dirsync | 5eae39e0e4a85d3d71c4690081b5f88e5efa9117 | [
"MIT"
] | 24 | 2015-01-07T09:51:28.000Z | 2022-02-11T15:03:57.000Z | tests/cmdline.py | robertschulze/dirsync | 5eae39e0e4a85d3d71c4690081b5f88e5efa9117 | [
"MIT"
] | 27 | 2015-01-07T09:12:02.000Z | 2022-02-08T20:27:00.000Z | """
Command line options tests
"""
import os
import re
from six import iteritems, StringIO
try:
# Python 3
from unittest.mock import patch
except ImportError:
from mock import patch
from dirsync.options import ArgParser
from dirsync.run import sync
from ._base import DirSyncTestCase
from . import trees
class CmdLineTests(DirSyncTestCase):
def dirsync(self, *args, **kwargs):
kwargs.update(vars(ArgParser().parse_args(args)))
sync(**kwargs)
class SyncTests(CmdLineTests):
init_trees = (('src', trees.simple),)
def test_sync(self):
self.dirsync('src', 'dst', '--sync', '-c')
self.assertIsFile('dst/file1.txt')
self.assertIsDir('dst/dir')
self.assertListDir('dst/dir', ['file4.txt'])
self.assertIsDir('dst/empty_dir')
self.assertListDir('dst/empty_dir', [])
def test_no_action(self):
with self.assertRaises(ValueError):
self.dirsync('src', 'dst')
def test_no_create(self):
with self.assertRaises(ValueError):
self.dirsync('src', 'dst', '--sync')
@patch('sys.stdout', new_callable=StringIO)
def test_output(self, stdout):
self.dirsync('src', 'dst', '--sync', '-c')
self.dirsync('src', 'dst', '--sync', '-c')
self.assertEqual(
re.sub('\d\.\d{2}', 'X', stdout.getvalue().strip()),
'dirsync finished in X seconds.\n'
'3 directories parsed, 4 files copied\n'
'3 directories were created.\n\n'
'dirsync finished in X seconds.\n'
'3 directories parsed, 0 files copied'
)
class CfgFiles(CmdLineTests):
init_trees = (('src', trees.simple),)
def mk_cfg_file(self, **options):
cfg_file = open(os.path.join('src', '.dirsync'), 'w')
cfg_file.write('[defaults]\n')
for opt, val in iteritems(options):
cfg_file.write('%s = %s\n' % (opt, str(val)))
cfg_file.close()
def test_sync_default(self):
self.mk_cfg_file(action='sync', create=True)
self.dirsync('src', 'dst')
self.assertIsFile('dst/file1.txt')
self.assertIsDir('dst/dir')
self.assertListDir('dst/dir', ['file4.txt'])
self.assertIsDir('dst/empty_dir')
self.assertListDir('dst/empty_dir', [])
self.assertNotExists('dst/.dirsync')
| 27.818182 | 65 | 0.583742 | 2,091 | 0.854167 | 0 | 0 | 538 | 0.219771 | 0 | 0 | 577 | 0.235703 |
5ba1947ace73c085c25f644e39fc215c1d92fdff | 8,562 | py | Python | ocean_provider/routes/decrypt.py | oceanprotocol/provider-service-py | 408a9032b30d3606a6b991f3982b7d17ded7cd47 | [
"Apache-2.0"
] | 1 | 2020-10-27T07:30:06.000Z | 2020-10-27T07:30:06.000Z | ocean_provider/routes/decrypt.py | oceanprotocol/provider-service-py | 408a9032b30d3606a6b991f3982b7d17ded7cd47 | [
"Apache-2.0"
] | 14 | 2020-05-28T11:50:18.000Z | 2020-10-26T09:51:49.000Z | ocean_provider/routes/decrypt.py | oceanprotocol/provider-service-py | 408a9032b30d3606a6b991f3982b7d17ded7cd47 | [
"Apache-2.0"
] | 2 | 2020-06-30T06:08:07.000Z | 2020-09-09T03:44:29.000Z | #
# Copyright 2021 Ocean Protocol Foundation
# SPDX-License-Identifier: Apache-2.0
#
import logging
import lzma
from hashlib import sha256
from typing import Optional, Tuple
from eth_typing.encoding import HexStr
from flask import Response, request
from flask_sieve import validate
from ocean_provider.requests_session import get_requests_session
from ocean_provider.user_nonce import update_nonce
from ocean_provider.utils.basics import (
get_config,
get_provider_wallet,
get_web3,
)
from ocean_provider.utils.data_nft import (
MetadataState,
get_metadata,
get_metadata_logs_from_tx_receipt,
)
from ocean_provider.utils.data_nft_factory import is_nft_deployed_from_factory
from ocean_provider.utils.encryption import do_decrypt
from ocean_provider.utils.error_responses import error_response
from ocean_provider.utils.util import get_request_data
from ocean_provider.validation.provider_requests import DecryptRequest
from web3.main import Web3
from . import services
provider_wallet = get_provider_wallet()
requests_session = get_requests_session()
logger = logging.getLogger(__name__)
@services.route("/decrypt", methods=["POST"])
@validate(DecryptRequest)
def decrypt():
"""Decrypts an encrypted document based on transaction Id or dataNftAddress.
---
consumes:
- application/json
parameters:
- name: decrypterAddress
description: address of agent requesting decrypt
type: string
required: true
- name: chainId
description: chainId of the chain on which the encrypted document is stored
type: int
required: true
- name: transactionId
description: transaction Id where the document was created or last updated,
required if dataNftAddress, encryptedDocument and flags parameters missing
required: false
type: string
- name: dataNftAddress
description: NFT address of the document,
required if the transactionId parameter is missing
required: false
type: string
- name: encryptedDocument
description: encrypted document contents,
required if the transactionId parameter is missing
required: false
type: string
- name: flags
description: encryption and compression flags,
required if the transactionId parameter is missing
required: false
type: int
- name: documentHash
description: hash of the original document used for integrity check,
required if the transactionId parameter is missing
required: false
type: int
- name: nonce
description: user nonce (timestamp)
required: true
type: decimal
- name: signature
description: user signature based on
transactionId+dataNftAddress+decrypterAddress+chainId+nonce
required: true
type: string
responses:
201:
description: decrypted document
400:
description: One or more of the required attributes are missing or invalid.
503:
description: Service Unavailable
"""
data = get_request_data(request)
logger.info(f"decrypt called. arguments = {data}")
return _decrypt(
decrypter_address=data.get("decrypterAddress"),
chain_id=data.get("chainId"),
transaction_id=data.get("transactionId"),
data_nft_address=data.get("dataNftAddress"),
encrypted_document=data.get("encryptedDocument"),
flags=data.get("flags"),
document_hash=data.get("documentHash"),
nonce=data.get("nonce"),
)
def _decrypt(
decrypter_address: HexStr,
chain_id: int,
transaction_id: Optional[HexStr],
data_nft_address: HexStr,
encrypted_document: Optional[HexStr],
flags: Optional[int],
document_hash: Optional[HexStr],
nonce: str,
) -> Response:
update_nonce(decrypter_address, nonce)
# Check if given chain_id matches Provider's chain_id
web3 = get_web3()
if web3.chain_id != chain_id:
return error_response(f"Unsupported chain ID {chain_id}", 400, logger)
# Check if decrypter is authorized
authorized_decrypters = get_config().authorized_decrypters
logger.info(f"authorized_decrypters = {authorized_decrypters}")
if authorized_decrypters and decrypter_address not in authorized_decrypters:
return error_response("Decrypter not authorized", 403, logger)
if not is_nft_deployed_from_factory(web3, data_nft_address):
return error_response(
"Asset not deployed by the data NFT factory.", 400, logger
)
if not transaction_id:
try:
(encrypted_document, flags, document_hash) = _convert_args_to_bytes(
encrypted_document, flags, document_hash
)
except Exception:
return error_response("Failed to convert input args to bytes.", 400, logger)
else:
try:
(
encrypted_document,
flags,
document_hash,
) = _get_args_from_transaction_id(web3, transaction_id, data_nft_address)
except Exception:
return error_response("Failed to process transaction id.", 400, logger)
logger.info(
f"data_nft_address = {data_nft_address}, "
f"encrypted_document as bytes = {encrypted_document}, "
f"flags as bytes = {flags}, "
f"document_hash as bytes = {document_hash}"
)
# Check if DDO metadata state is ACTIVE
(_, _, metadata_state, _) = get_metadata(web3, data_nft_address)
logger.info(f"metadata_state = {metadata_state}")
if metadata_state in [MetadataState.ACTIVE, MetadataState.TEMPORARILY_DISABLED]:
pass
elif metadata_state == MetadataState.END_OF_LIFE:
return error_response("Asset end of life", 403, logger)
elif metadata_state == MetadataState.DEPRECATED:
return error_response("Asset deprecated", 403, logger)
elif metadata_state == MetadataState.REVOKED:
return error_response("Asset revoked", 403, logger)
else:
return error_response("Invalid MetadataState", 400, logger)
working_document = encrypted_document
# bit 2: check if DDO is ecies encrypted
if flags[0] & 2:
try:
working_document = do_decrypt(working_document, get_provider_wallet())
logger.info("Successfully decrypted document.")
except Exception:
return error_response("Failed to decrypt.", 400, logger)
else:
logger.warning(
"Document not encrypted (flags bit 2 not set). Skipping decryption."
)
# bit 1: check if DDO is lzma compressed
if flags[0] & 1:
try:
working_document = lzma.decompress(working_document)
logger.info("Successfully decompressed document.")
except Exception:
return error_response("Failed to decompress", 400, logger)
document = working_document
logger.info(f"document = {document}")
# Verify checksum matches
if sha256(document).hexdigest() != document_hash.hex():
return error_response("Checksum doesn't match.", 400, logger)
logger.info("Checksum matches.")
response = Response(document, 201, {"Content-type": "text/plain"})
logger.info(f"decrypt response = {response}")
return response
def _convert_args_to_bytes(
encrypted_document: HexStr, flags: int, document_hash: HexStr
) -> Tuple[bytes, bytes, bytes]:
"""Return the encrypted_document, flags, and document_hash as bytes."""
return (
Web3.toBytes(hexstr=encrypted_document),
flags.to_bytes(1, "big"),
Web3.toBytes(hexstr=document_hash),
)
def _get_args_from_transaction_id(
web3: Web3, transaction_id: HexStr, data_nft_address: HexStr
) -> Tuple[bytes, bytes, bytes]:
"""Get the MetadataCreated and MetadataUpdated logs from the transaction id.
Parse logs and return the data_nft_address, encrypted_document, flags, and
document_hash.
"""
tx_receipt = web3.eth.get_transaction_receipt(transaction_id)
logs = get_metadata_logs_from_tx_receipt(web3, tx_receipt, data_nft_address)
logger.info(f"transaction_id = {transaction_id}, logs = {logs}")
if len(logs) > 1:
logger.warning(
"More than 1 MetadataCreated/MetadataUpdated event detected. "
"Using the event at index 0."
)
log = logs[0]
return (log.args["data"], log.args["flags"], log.args["metaDataHash"])
| 35.234568 | 88 | 0.686872 | 0 | 0 | 0 | 0 | 2,524 | 0.294791 | 0 | 0 | 3,660 | 0.42747 |
5ba1b03217590cd51cf14bd304c81cdb93925ba6 | 79 | py | Python | aries_cloudagent/protocols/connections/v1_0/role.py | ankita-p17/aries-cloudagent-python | f849e360b0372fa58ffd7d043f39e3b81375570e | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/protocols/connections/v1_0/role.py | ankita-p17/aries-cloudagent-python | f849e360b0372fa58ffd7d043f39e3b81375570e | [
"Apache-2.0"
] | null | null | null | aries_cloudagent/protocols/connections/v1_0/role.py | ankita-p17/aries-cloudagent-python | f849e360b0372fa58ffd7d043f39e3b81375570e | [
"Apache-2.0"
] | null | null | null | from enum import Enum
class Role(Enum):
AUTHOR = (1,)
ENDORSER = (2,) | 13.166667 | 21 | 0.594937 | 56 | 0.708861 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
5ba512b3cc3c02047e366a68b92d00520b0293a3 | 2,699 | py | Python | src/cowrie/output/csirtg.py | uwacyber/cowrie | 1d81e1ca5d0b8461e06f17aee26cb7bf108a16e5 | [
"BSD-3-Clause"
] | 2,316 | 2015-06-25T18:51:02.000Z | 2022-03-29T23:31:16.000Z | src/cowrie/output/csirtg.py | uwacyber/cowrie | 1d81e1ca5d0b8461e06f17aee26cb7bf108a16e5 | [
"BSD-3-Clause"
] | 632 | 2015-07-08T19:02:05.000Z | 2022-03-20T03:20:56.000Z | src/cowrie/output/csirtg.py | uwacyber/cowrie | 1d81e1ca5d0b8461e06f17aee26cb7bf108a16e5 | [
"BSD-3-Clause"
] | 486 | 2015-08-12T05:02:45.000Z | 2022-03-27T19:54:02.000Z | from __future__ import annotations
import os
from datetime import datetime
from twisted.python import log
import cowrie.core.output
from cowrie.core.config import CowrieConfig
token = CowrieConfig.get("output_csirtg", "token", fallback="a1b2c3d4")
if token == "a1b2c3d4":
log.msg("output_csirtg: token not found in configuration file")
exit(1)
os.environ["CSIRTG_TOKEN"] = token
import csirtgsdk # noqa: E402
class Output(cowrie.core.output.Output):
"""
CSIRTG output
"""
def start(self):
"""
Start the output module.
Note that csirtsdk is imported here because it reads CSIRTG_TOKEN on import
Cowrie sets this environment variable.
"""
self.user = CowrieConfig.get("output_csirtg", "username")
self.feed = CowrieConfig.get("output_csirtg", "feed")
self.debug = CowrieConfig.getboolean("output_csirtg", "debug", fallback=False)
self.description = CowrieConfig.get("output_csirtg", "description")
self.context = {}
# self.client = csirtgsdk.client.Client()
def stop(self):
pass
def write(self, e):
"""
Only pass on connection events
"""
if e["eventid"] == "cowrie.session.connect":
self.submitIp(e)
def submitIp(self, e):
peerIP = e["src_ip"]
ts = e["timestamp"]
system = e.get("system", None)
if system not in [
"cowrie.ssh.factory.CowrieSSHFactory",
"cowrie.telnet.transport.HoneyPotTelnetFactory",
]:
return
today = str(datetime.now().date())
if not self.context.get(today):
self.context = {}
self.context[today] = set()
key = ",".join([peerIP, system])
if key in self.context[today]:
return
self.context[today].add(key)
tags = "scanner,ssh"
port = 22
if e["system"] == "cowrie.telnet.transport.HoneyPotTelnetFactory":
tags = "scanner,telnet"
port = 23
i = {
"user": self.user,
"feed": self.feed,
"indicator": peerIP,
"portlist": port,
"protocol": "tcp",
"tags": tags,
"firsttime": ts,
"lasttime": ts,
"description": self.description,
}
if self.debug is True:
log.msg(f"output_csirtg: Submitting {i!r} to CSIRTG")
ind = csirtgsdk.indicator.Indicator(i).submit()
if self.debug is True:
log.msg(f"output_csirtg: Submitted {ind!r} to CSIRTG")
log.msg("output_csirtg: submitted to csirtg at {} ".format(ind["location"]))
| 27.262626 | 86 | 0.577251 | 2,274 | 0.842534 | 0 | 0 | 0 | 0 | 0 | 0 | 982 | 0.363838 |
5ba5c6b926abcc27d77eb56e1ab7d35c7c997288 | 1,355 | py | Python | gui.py | Skezzowski/Rock-Paper-Scissors-Recognizer | 1e3122c648d5eedef8909b75825021de590c6fdc | [
"MIT"
] | null | null | null | gui.py | Skezzowski/Rock-Paper-Scissors-Recognizer | 1e3122c648d5eedef8909b75825021de590c6fdc | [
"MIT"
] | null | null | null | gui.py | Skezzowski/Rock-Paper-Scissors-Recognizer | 1e3122c648d5eedef8909b75825021de590c6fdc | [
"MIT"
] | null | null | null | import cv2
from PIL import Image, ImageTk
from appJar import gui
from segmentation import segment_hand_with_background
import recognize as rec
import skeleton as sk
def opencv_image_to_appjar_image(image):
b, g, r = cv2.split(image)
im = Image.fromarray(cv2.merge((r, g, b)))
return ImageTk.PhotoImage(im)
def submit(btn):
file_path = app.getEntry("f1")
if file_path != "":
img = cv2.imread(file_path)
segmented = segment_hand_with_background(img)
cv2.imshow('teszt', segmented)
app.reloadImageData("pic", opencv_image_to_appjar_image(img), fmt="PhotoImage")
skeleton = sk.skeleton_of_shape(segmented)
bgr = cv2.cvtColor(segmented, cv2.COLOR_GRAY2BGR)
b, g, r = cv2.split(bgr)
b = cv2.bitwise_and(b, ~skeleton)
g = cv2.bitwise_and(g, ~skeleton)
bgr = cv2.merge((b, g, r))
cv2.imshow('osszegezve', bgr)
app.setLabel("result", rec.recognize(segmented, skeleton))
app = gui("RPS Recognizer")
app.setStretch("none")
app.addLabel("Rock-Paper-Scissors Recognizer")
app.addFileEntry("f1")
app.addButton("Submit", submit)
# default image
image = cv2.imread("images/testing/testing-3.png")
im = Image.fromarray(image)
imtk = ImageTk.PhotoImage(im)
app.addImageData("pic", imtk, fmt="PhotoImage")
app.addLabel("result", "fa")
app.go()
| 26.568627 | 87 | 0.684133 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 191 | 0.140959 |
5ba646458e889ae3b1ad2c81a1b4ddcbd8846e71 | 1,347 | py | Python | eval/HighlightBins/hist.py | mkirsche/sapling | 4bbe08ecabd2d7d05f0c7ad1369fef7d0de8cc85 | [
"MIT"
] | 20 | 2018-10-18T16:42:50.000Z | 2022-01-12T13:13:09.000Z | eval/HighlightBins/hist.py | mkirsche/sapling | 4bbe08ecabd2d7d05f0c7ad1369fef7d0de8cc85 | [
"MIT"
] | 1 | 2020-11-26T08:07:30.000Z | 2020-12-02T05:41:25.000Z | eval/HighlightBins/hist.py | mkirsche/sapling | 4bbe08ecabd2d7d05f0c7ad1369fef7d0de8cc85 | [
"MIT"
] | 6 | 2020-01-29T21:59:55.000Z | 2022-03-31T23:38:01.000Z | import sys
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
fn = sys.argv[1]
pal = sns.color_palette()
with open(fn) as f:
toPlot = []
names = []
goodness = []
xs = []
ys = []
ps = []
sns.set()
for line in f:
tokens = line.split(' ')
if len(tokens) == 1:
numBins = int(tokens[0])
for i in range(0, numBins):
toPlot.append([])
xs.append([])
ys.append([])
ps.append([])
names.append('')
goodness.append(0)
else:
binId = int(tokens[0])
plotNum = int(tokens[1])
val = int(tokens[2])
xs[plotNum].append(int(tokens[4]))
ys[plotNum].append(int(tokens[5]))
ps[plotNum].append(int(tokens[6]))
toPlot[plotNum].append(val)
names[plotNum] = str(binId)
goodness[plotNum] = int(tokens[3])
for i in range(0, len(toPlot)):
clr = pal[2]
#sns.distplot(toPlot[i], kde=False, bins = 50, color=clr)
#plt.title('bin ' + names[i])
#plt.savefig('figures/binHist' + str(i+1) + '.png')
#plt.cla()
#plt.clf()
#plt.close()
sns.lineplot(x=xs[i], y=ys[i], color = pal[0])
sns.lineplot(x=xs[i], y=ps[i], color = clr)
plt.title('bin ' + names[i])
plt.savefig('figures/binScatter' + str(i+1) + '.png')
plt.cla()
plt.clf()
plt.close()
| 25.415094 | 61 | 0.55605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 211 | 0.156644 |
5ba693f02b6eec231bd51eee1c8015fbc38f46e9 | 2,882 | py | Python | obj_sys/obj_tools.py | weijia/obj_sys | 7654a84f155f8e0da942f980d06c6ada34a6d71e | [
"BSD-3-Clause"
] | null | null | null | obj_sys/obj_tools.py | weijia/obj_sys | 7654a84f155f8e0da942f980d06c6ada34a6d71e | [
"BSD-3-Clause"
] | null | null | null | obj_sys/obj_tools.py | weijia/obj_sys | 7654a84f155f8e0da942f980d06c6ada34a6d71e | [
"BSD-3-Clause"
] | null | null | null | import socket
import logging
from ufs_tools import format_path
def get_fs_protocol_separator():
try:
import configurationTools as config
return config.getFsProtocolSeparator()
except ImportError:
return "://"
gUfsObjUrlPrefix = u'ufs' + get_fs_protocol_separator()
gUfsObjUrlSeparator = u'/'
log = logging.getLogger(__name__)
def is_web_url(url):
# log.error(url)
if is_ufs_url(url):
protocol = get_protocol(url)
if protocol in ["https", "http", "ftp"]:
return True
return False
def get_protocol(url):
parse_res = parse_url(url)
protocol = parse_res[0]
return protocol
def get_formatted_full_path(full_path):
return format_path(full_path)
def parse_url(url):
return url.split(get_fs_protocol_separator(), 2)
def get_hostname():
return unicode(socket.gethostname())
def get_ufs_url_for_local_path(full_path):
return gUfsObjUrlPrefix + get_hostname() + gUfsObjUrlSeparator + format_path(full_path)
def get_full_path_from_ufs_url(ufs_url):
if not is_ufs_fs(ufs_url):
raise "not ufs url"
objPath = parse_url(ufs_url)[1]
hostname, full_path = objPath.split(gUfsObjUrlSeparator, 1)
# print hostname, full_path
if unicode(hostname) != get_hostname():
raise 'not a local file'
return full_path
def get_full_path_for_local_os(ufs_url):
url_content = parse_url(ufs_url)[1]
if '/' == url_content[0]:
# The path returned by qt is file:///d:/xxxx, so we must remove the '/' char first
return url_content[1:]
return url_content
def is_uuid(url):
return url.find(u"uuid" + get_fs_protocol_separator()) == 0
def get_url_content(url):
protocol, content = parse_url(url)
return content
def get_path_for_ufs_url(url):
url_content = get_url_content(url)
return url_content.split(gUfsObjUrlSeparator, 1)[1]
def get_uuid(url):
return get_url_content(url)
def get_url_for_uuid(id):
return u"uuid" + get_fs_protocol_separator() + id
def is_ufs_url(url):
"""
In format of xxxx://xxxx
:param url:
"""
if url.find(get_fs_protocol_separator()) == -1:
return False
else:
return True
def get_ufs_local_root_url():
return gUfsObjUrlPrefix + get_hostname() + gUfsObjUrlSeparator
def is_ufs_fs(url):
return url.find(gUfsObjUrlPrefix) == 0
def get_ufs_basename(url):
return url.rsplit(gUfsObjUrlSeparator, 1)[1]
def get_host(ufs_url):
if is_ufs_fs(ufs_url):
path_with_host = parse_url(ufs_url)[1]
return path_with_host.split(u"/")[0]
raise "Not Ufs URL"
def is_local(ufs_url):
"""
ufs_url in format ufs://hostname/D:/tmp/xxx.xxx
"""
if get_host(ufs_url) == get_hostname():
return True
else:
print "not local", get_host(ufs_url), get_hostname()
return False
| 22 | 91 | 0.683206 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 353 | 0.122484 |
5ba72256a6d0dd8def446e04a42967abcf78e13a | 935 | py | Python | django_pandas/tests/models.py | patseng/django-pandas | 289c9bd5e245551f932a28c9dd185ff1e82346d3 | [
"BSD-3-Clause"
] | null | null | null | django_pandas/tests/models.py | patseng/django-pandas | 289c9bd5e245551f932a28c9dd185ff1e82346d3 | [
"BSD-3-Clause"
] | null | null | null | django_pandas/tests/models.py | patseng/django-pandas | 289c9bd5e245551f932a28c9dd185ff1e82346d3 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from django_pandas.managers import DataFrameManager
class DataFrame(models.Model):
index = models.CharField(max_length=1)
col1 = models.IntegerField()
col2 = models.FloatField()
col3 = models.FloatField()
col4 = models.IntegerField()
objects = DataFrameManager()
def __unicode__(self):
return "{} {} {} {}".format(
self.index,
self.col1,
self.col2,
self.col3,
self.col4
)
class FlatTimeSeries(models.Model):
date_ix = models.DateTimeField()
col1 = models.FloatField()
col2 = models.FloatField()
col3 = models.FloatField()
col4 = models.FloatField()
objects = DataFrameManager()
def __unicode__(self):
return "{} {} {} {}".format(
self.date_ix,
self.col1,
self.col2,
self.col3,
self.col4
)
| 22.261905 | 51 | 0.574332 | 848 | 0.906952 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.027807 |
5ba7d313b7cd41420e9e5febeab4e32d8628ee1f | 2,304 | py | Python | src/apps/recommendations/tests/x_test_comparable_inventories.py | Remy-TPP/q-api | 761dd2d15557cb9a8bdc0b397fc10e0cf7c95c03 | [
"MIT"
] | null | null | null | src/apps/recommendations/tests/x_test_comparable_inventories.py | Remy-TPP/q-api | 761dd2d15557cb9a8bdc0b397fc10e0cf7c95c03 | [
"MIT"
] | 14 | 2020-06-23T20:32:16.000Z | 2022-01-13T03:13:20.000Z | src/apps/recommendations/tests/x_test_comparable_inventories.py | MartinCura/q-api | 761dd2d15557cb9a8bdc0b397fc10e0cf7c95c03 | [
"MIT"
] | null | null | null | from unittest import TestCase
from apps.profiles.models import Profile
from apps.recipes.models import Recipe, Ingredient
from apps.recommendations.utils import ComparableInventory
# TODO: written for manual testing with preloaded db; for general use should create resources in setUp()
class ComparableInventoryTest(TestCase):
def setUp(self):
prof = Profile.objects.get(pk=3)
place = prof.places.first()
inv = place.inventory.all().prefetch_related('product', 'unit')
try:
print('creating')
print(f'---- {self.inv}')
self.inv.print_inventory(product_id=329)
except AttributeError:
pass
self.inv = ComparableInventory(inv)
self.inv.print_inventory(product_id=329)
def tearDown(self):
print('destroying')
self.inv.destroy()
self.inv.print_inventory()
self.inv = None
print('destroyed')
def test_print(self):
self.inv.print_inventory(product_id=329)
def test_substract_ingredient(self):
# Product ID 329: pepino
ing = Ingredient.objects.filter(product_id=329)[0]
print(ing)
self.inv.print_inventory(product_id=329)
self.inv.substract(ing)
print(self.inv.inventory.get(329))
self.inv.print_inventory(product_id=329)
def test_reset(self):
ing = Ingredient.objects.filter(product_id=329)[0]
self.assertEqual(self.inv.get(329).quantity, 3)
self.inv.substract(ing)
self.assertEqual(self.inv.get(329).quantity, 2)
self.inv.substract(ing)
self.assertEqual(self.inv.get(329).quantity, 1)
self.inv.reset()
self.assertEqual(self.inv.get(329).quantity, 3)
def test_can_make_recipe(self):
# Shouldn't be able to do this
recipe1 = Recipe.objects.get(pk=313)
self.assertFalse(self.inv.can_make(recipe1))
# Should be able to make this one
recipe2 = Recipe.objects.get(pk=291)
self.assertTrue(self.inv.can_make(recipe2))
def test_can_make_multiple_times(self):
recipe = Recipe.objects.get(pk=291)
self.assertTrue(self.inv.can_make(recipe))
self.assertTrue(self.inv.can_make(recipe))
self.assertTrue(self.inv.can_make(recipe))
| 32 | 104 | 0.656684 | 2,014 | 0.874132 | 0 | 0 | 0 | 0 | 0 | 0 | 257 | 0.111545 |
5ba7dd577c3e8828d8289625c9be21e83ca75ece | 2,378 | py | Python | api/migrations/0076_auto_20200728_1500.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | 11 | 2018-06-11T06:05:12.000Z | 2022-03-25T09:31:44.000Z | api/migrations/0076_auto_20200728_1500.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | 498 | 2017-11-07T21:20:13.000Z | 2022-03-31T14:37:18.000Z | api/migrations/0076_auto_20200728_1500.py | IFRCGo/ifrcgo-api | c1c3e0cf1076ab48d03db6aaf7a00f8485ca9e1a | [
"MIT"
] | 6 | 2018-04-11T13:29:50.000Z | 2020-07-16T16:52:11.000Z | # Generated by Django 2.2.13 on 2020-07-28 15:00
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0075_profile_last_frontend_login'),
]
operations = [
migrations.RemoveField(
model_name='fieldreport',
name='cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_num_dead',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='health_min_suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='other_suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='suspected_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_confirmed_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_num_dead',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_probable_cases',
),
migrations.RemoveField(
model_name='fieldreport',
name='who_suspected_cases',
),
]
| 27.651163 | 52 | 0.543314 | 2,292 | 0.963835 | 0 | 0 | 0 | 0 | 0 | 0 | 668 | 0.280908 |
5ba8bd99382a6bd8f9062e85811ea5bbc00b9220 | 345 | py | Python | main.py | alextremblay962/Hydropinic_System | 7fcc87f110425183fb8e18c7f7c6664781565f65 | [
"MIT"
] | null | null | null | main.py | alextremblay962/Hydropinic_System | 7fcc87f110425183fb8e18c7f7c6664781565f65 | [
"MIT"
] | null | null | null | main.py | alextremblay962/Hydropinic_System | 7fcc87f110425183fb8e18c7f7c6664781565f65 | [
"MIT"
] | null | null | null | import serial
import json
import io
import time
ser = serial.Serial("COM24" , 9600, timeout=2)
topic = "hydro/light1"
payload = 1
#data = json.dumps({"topic":topic,"payload":payload})
data = "{\"topic\":\"hydro/light1\",\"payload\":1}"
data = data.encode()
print(data)
ser.write(b'A')
hello = ser.readline()#.decode("ascii")
print(hello) | 16.428571 | 53 | 0.669565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 139 | 0.402899 |
5ba9e26eca34452a68418484aba93e32e621f681 | 366 | py | Python | level_3/challenge_2.py | mouse-reeve/foobar | 16b66b2bf50f612d2a2b72a3997fb954cff72a0b | [
"MIT"
] | null | null | null | level_3/challenge_2.py | mouse-reeve/foobar | 16b66b2bf50f612d2a2b72a3997fb954cff72a0b | [
"MIT"
] | 1 | 2015-10-15T21:53:15.000Z | 2016-03-04T18:31:37.000Z | level_3/challenge_2.py | mouse-reeve/foobar | 16b66b2bf50f612d2a2b72a3997fb954cff72a0b | [
"MIT"
] | null | null | null | ''' Compute a digest message '''
def answer(digest):
''' solve for m[1] '''
message = []
for i, v in enumerate(digest):
pv = message[i - 1] if i > 0 else 0
m = 0.1
a = 0
while m != int(m):
m = ((256 * a) + (v ^ pv)) / 129.0
a += 1
m = int(m)
message.append(m)
return message
| 20.333333 | 46 | 0.42623 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.147541 |
5bac7ae6136379e32a79e0f45ffc9c4c50e02c35 | 274 | py | Python | pacote-download/pythonProject/exercicios_python_guanabara/ex019.py | oliveirajonathas/python_estudos | 28921672d7e5d0866030c45b077a28998905f752 | [
"MIT"
] | null | null | null | pacote-download/pythonProject/exercicios_python_guanabara/ex019.py | oliveirajonathas/python_estudos | 28921672d7e5d0866030c45b077a28998905f752 | [
"MIT"
] | null | null | null | pacote-download/pythonProject/exercicios_python_guanabara/ex019.py | oliveirajonathas/python_estudos | 28921672d7e5d0866030c45b077a28998905f752 | [
"MIT"
] | null | null | null | import random
aluno1 = input('Nome aluno 1: ')
aluno2 = input('Nome aluno 2: ')
aluno3 = input('Nome aluno 3: ')
aluno4 = input('Nome aluno 4: ')
sorteado = random.choice([aluno1, aluno2, aluno3, aluno4])
print('O sorteado para apagar o quadro foi: {}'.format(sorteado))
| 24.909091 | 65 | 0.686131 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.383212 |
5badd82fc3129e47361d6ac0510385c34b87a716 | 306 | py | Python | tests/test_reset_plot.py | l-johnston/toolbag | 1bd6ca61bfaf5856e5de320926d5593291e39e9c | [
"MIT"
] | null | null | null | tests/test_reset_plot.py | l-johnston/toolbag | 1bd6ca61bfaf5856e5de320926d5593291e39e9c | [
"MIT"
] | null | null | null | tests/test_reset_plot.py | l-johnston/toolbag | 1bd6ca61bfaf5856e5de320926d5593291e39e9c | [
"MIT"
] | null | null | null | """Test reset_plot"""
import matplotlib.pyplot as plt
from toolbag import reset_plot
plt.ion()
# pylint: disable = missing-function-docstring
def test_reset_plot():
fig, ax = plt.subplots()
ax.plot([1, 2, 3])
plt.close()
reset_plot(fig)
assert id(ax) == id(fig.gca())
plt.close()
| 20.4 | 46 | 0.656863 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 67 | 0.218954 |
5bade8022eec80f2a26ec5d44c9c7347683447d6 | 330 | py | Python | af/shovel/oonipl/popen.py | mimi89999/pipeline | 3e9eaf74c0966df907a230fbe89407c2bbc3d930 | [
"BSD-3-Clause"
] | null | null | null | af/shovel/oonipl/popen.py | mimi89999/pipeline | 3e9eaf74c0966df907a230fbe89407c2bbc3d930 | [
"BSD-3-Clause"
] | null | null | null | af/shovel/oonipl/popen.py | mimi89999/pipeline | 3e9eaf74c0966df907a230fbe89407c2bbc3d930 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
from subprocess import Popen, PIPE
from contextlib import contextmanager
@contextmanager
def ScopedPopen(*args, **kwargs):
proc = Popen(*args, **kwargs)
try:
yield proc
finally:
try:
proc.kill()
except Exception:
pass
| 18.333333 | 37 | 0.593939 | 0 | 0 | 188 | 0.569697 | 204 | 0.618182 | 0 | 0 | 47 | 0.142424 |
5baf5da6b3b56130ae127a49e7d55ba199f66090 | 3,104 | py | Python | software/dsp/ddc.py | loxodes/phasenoise | 21dc8cd6ca167c215bd5e2b579d65062c7bdc848 | [
"MIT"
] | 14 | 2020-09-20T18:38:18.000Z | 2021-08-06T09:17:19.000Z | software/dsp/ddc.py | loxodes/phasenoise | 21dc8cd6ca167c215bd5e2b579d65062c7bdc848 | [
"MIT"
] | null | null | null | software/dsp/ddc.py | loxodes/phasenoise | 21dc8cd6ca167c215bd5e2b579d65062c7bdc848 | [
"MIT"
] | 1 | 2021-02-06T13:46:16.000Z | 2021-02-06T13:46:16.000Z | from migen import *
from nco import NCO
from mixer import Mixer
from cic import CIC, CompensationFilter
import numpy as np
import matplotlib.pyplot as plt
class DDC(Module):
def __init__(self, input_bits = 12, output_bits = 16, phaseinc_bits = 18, nco_bits = 18, if_bits = 20):
self.i_sample = Signal((input_bits, True))
self.o_i = Signal((output_bits, True))
self.o_q = Signal((output_bits, True))
self.o_valid = Signal()
self.i_nco_freq = Signal(phaseinc_bits)
self.input_bits = input_bits
self.output_bits = output_bits
self.phase_bits = phaseinc_bits
self.if_bits = if_bits
self.nco_bits = nco_bits
self.nco = nco = NCO(output_bits = nco_bits, phaseinc_bits = phaseinc_bits)
self.mixer = mixer = Mixer(sample_bits = input_bits, nco_bits = nco_bits, output_bits = if_bits)
self.cic_i = cic_i = CIC(input_bits = if_bits)
self.cic_q = cic_q = CIC(input_bits = if_bits)
self.comb += [
mixer.i_sample.eq(self.i_sample),
mixer.i_nco_i.eq(nco.o_nco_i),
mixer.i_nco_q.eq(nco.o_nco_q),
cic_i.i_sample.eq(mixer.o_i),
cic_q.i_sample.eq(mixer.o_q),
self.o_valid.eq(cic_i.o_valid),
self.o_i.eq(cic_i.o_result[(cic_i.filter_bits - output_bits):]),
self.o_q.eq(cic_q.o_result[(cic_q.filter_bits - output_bits):])]
self.submodules += [nco, mixer, cic_i, cic_q,]
#comp_fir = CompensationFilter()
#self.submodules += comp_fir
def ddc_test(dut):
f_s = 25e6
f_in = 5e6
f_nco = -4.9e6
duration = .0001
t_s = 1/f_s
t = np.arange(0,duration,t_s)
s_in = np.sin(2 * np.pi * f_in * t) * (2 ** 10)
phase_inc = dut.nco.calc_phase_inc(f_s, f_nco)
yield dut.nco.i_phase_inc.eq(phase_inc)
i_results = []
q_results = []
mixer_out = []
nco_out = []
for s_i in s_in:
yield dut.i_sample.eq(int(s_i))
m_i = (yield dut.o_i)
n_i = (yield dut.nco.o_nco_i)
mixer_out.append(m_i)
nco_out.append(n_i)
if (yield dut.o_valid):
i_result = (yield dut.o_i)
q_result = (yield dut.o_q)
i_results.append(i_result)
q_results.append(q_result)
yield
nco_out = np.array(nco_out)
mixer_out = np.array(mixer_out)
synth_mixer = nco_out * s_in
i_results = np.array(i_results)
q_results = np.array(q_results)
s_results = i_results + 1j * q_results
s_results = s_results - np.mean(s_results)
plt.subplot(3,1,1)
plt.plot(nco_out)
plt.plot(mixer_out)
plt.title('NCO and mixer output')
plt.subplot(3,1,2)
plt.plot(np.real(s_results))
plt.plot(np.imag(s_results))
plt.title('real and imag output of ddc')
plt.subplot(3,1,3)
plt.magnitude_spectrum(s_results, f_s/8, scale='dB', alpha=.5)
plt.title('ddc output spectrum')
plt.show()
if __name__ == '__main__':
dut = DDC()
run_simulation(dut, ddc_test(dut), vcd_name="ddc_test.vcd")
| 27.22807 | 107 | 0.615979 | 1,409 | 0.45393 | 1,410 | 0.454253 | 0 | 0 | 0 | 0 | 160 | 0.051546 |
5baf938ce58015edbc41bd4ae801eee8162b79c1 | 2,282 | py | Python | profileparser.py | JimKnowler/profile-visualiser | 2398b17c68ea748fc82e7cc15e43ccbfb64f8e2c | [
"MIT"
] | 3 | 2018-06-19T16:23:35.000Z | 2021-07-15T05:35:21.000Z | profileparser.py | JimKnowler/profile-visualiser | 2398b17c68ea748fc82e7cc15e43ccbfb64f8e2c | [
"MIT"
] | null | null | null | profileparser.py | JimKnowler/profile-visualiser | 2398b17c68ea748fc82e7cc15e43ccbfb64f8e2c | [
"MIT"
] | null | null | null | class ProfileParser:
def __init__(self, consumer):
self._consumer = consumer
def load_file(self, filename):
with open(filename, "r") as file:
for line_number, line in enumerate(file):
try:
line = line.rstrip()
self.parse(line)
except Exception as e:
print "exception while parsing line ", line_number
print ">> line: [", line, "]"
print ">>", e
raise e
def parse(self, line):
if line.startswith('#'):
# ignore comment lines
return
split_line = line.split(' ',1)
line_type = split_line[0]
if line_type == 'T':
split_line = line.split(' ',2)
thread_id = int(split_line[1])
thread_label = split_line[2]
self._consumer.on_thread(thread_id, thread_label)
elif line_type == 'F':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
function_id = int(split_line[2])
function_label = split_line[3]
self._consumer.on_function(thread_id, function_id, function_label)
elif line_type == 'S':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
function_id = int(split_line[2])
time = int(split_line[3])
self._consumer.on_sample_start(thread_id, function_id, time)
elif line_type == 'E':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
function_id = int(split_line[2])
time = int(split_line[3])
self._consumer.on_sample_finish(thread_id, function_id, time)
elif line_type == 'V':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
event_id = int(split_line[2])
event_label = split_line[3]
self._consumer.on_event(thread_id, event_id, event_label)
elif line_type == 'Y':
split_line = line.split(' ',3)
thread_id = int(split_line[1])
event_id = int(split_line[2])
time = int(split_line[3])
self._consumer.on_event_emit(thread_id, event_id, time)
elif line_type == 'C':
split_line = line.split(' ',2)
counter_id = int(split_line[1])
counter_label = split_line[2]
self._consumer.on_counter(counter_id, counter_label)
elif line_type == 'D':
split_line = line.split(' ',3)
counter_id = int(split_line[1])
time = int(split_line[2])
counter_value = int(split_line[3])
self._consumer.on_counter_value(counter_id, time, counter_value)
| 25.931818 | 69 | 0.667835 | 2,281 | 0.999562 | 0 | 0 | 0 | 0 | 0 | 0 | 129 | 0.056529 |
5baff64f81ba252289a1e5fc1113a99049bc4207 | 1,372 | py | Python | moog/tasks/composite_task.py | juanpablordz/moog.github.io | d7995d3563492378d0877ce8d16f5ca9a8031794 | [
"Apache-2.0",
"MIT"
] | 22 | 2021-02-26T18:19:35.000Z | 2022-03-05T19:01:00.000Z | moog/tasks/composite_task.py | juanpablordz/moog.github.io | d7995d3563492378d0877ce8d16f5ca9a8031794 | [
"Apache-2.0",
"MIT"
] | 1 | 2021-04-01T06:15:02.000Z | 2021-04-23T13:14:12.000Z | moog/tasks/composite_task.py | juanpablordz/moog.github.io | d7995d3563492378d0877ce8d16f5ca9a8031794 | [
"Apache-2.0",
"MIT"
] | 2 | 2021-05-02T02:20:39.000Z | 2021-05-06T16:24:35.000Z | """Composite task."""
from . import abstract_task
import numpy as np
class CompositeTask(abstract_task.AbstractTask):
"""CompositeTask task.
This combines multiple tasks at once, summing the rewards from each of them.
This can be useful for example to have a predator/prey task where there are
positive rewards for catching the prey and negative rewards for being caught
by the predators.
"""
def __init__(self, *tasks, timeout_steps=np.inf):
"""Constructor.
Args:
tasks: Tasks to compose. Reward will be the sum of the rewards from
each of these tasks.
timeout_steps: After this number of steps since reset, a reset is
forced.
"""
self._tasks = tasks
self._timeout_steps = timeout_steps
def reset(self, state, meta_state):
for task in self._tasks:
task.reset(state, meta_state)
def reward(self, state, meta_state, step_count):
"""Compute reward."""
reward = 0
should_reset = step_count >= self._timeout_steps
for task in self._tasks:
task_reward, task_should_reset = task.reward(
state, meta_state, step_count)
reward += task_reward
should_reset = should_reset or task_should_reset
return reward, should_reset
| 31.906977 | 80 | 0.637026 | 1,299 | 0.946793 | 0 | 0 | 0 | 0 | 0 | 0 | 598 | 0.43586 |
5bb15a5ffb620445f0397de1c06daebdacb8541a | 908 | py | Python | main/api/fields.py | lipis/gae-init-magic | 6b1e0b50f8e5200cb2dacebca9ac65e796b241a9 | [
"MIT"
] | 465 | 2015-01-01T17:49:09.000Z | 2021-12-06T15:00:40.000Z | main/api/fields.py | lipis/gae-init-magic | 6b1e0b50f8e5200cb2dacebca9ac65e796b241a9 | [
"MIT"
] | 652 | 2018-10-26T12:28:08.000Z | 2021-08-02T09:13:48.000Z | main/api/fields.py | lipis/gae-init-magic | 6b1e0b50f8e5200cb2dacebca9ac65e796b241a9 | [
"MIT"
] | 171 | 2015-01-01T16:48:09.000Z | 2022-03-15T21:48:52.000Z | # coding: utf-8
import urllib
from flask_restful import fields
from flask_restful.fields import *
class BlobKey(fields.Raw):
def format(self, value):
return urllib.quote(str(value))
class Blob(fields.Raw):
def format(self, value):
return repr(value)
class DateTime(fields.DateTime):
def format(self, value):
return value.isoformat()
class GeoPt(fields.Raw):
def format(self, value):
return '%s,%s' % (value.lat, value.lon)
class Id(fields.Raw):
def output(self, key, obj):
try:
value = getattr(obj, 'key', None).id()
return super(Id, self).output(key, {'id': value})
except AttributeError:
return None
class Integer(fields.Integer):
def format(self, value):
if value > 9007199254740992 or value < -9007199254740992:
return str(value)
return value
class Key(fields.Raw):
def format(self, value):
return value.urlsafe()
| 18.916667 | 61 | 0.67511 | 787 | 0.86674 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.034141 |
5bb27bd7739504eb9fc2a3cbb5ae875a2a149f7e | 1,353 | py | Python | polls/views.py | pygabo/omnik | 579b20671515d8a38b56df8c5bc837bd201ec7b0 | [
"MIT"
] | null | null | null | polls/views.py | pygabo/omnik | 579b20671515d8a38b56df8c5bc837bd201ec7b0 | [
"MIT"
] | null | null | null | polls/views.py | pygabo/omnik | 579b20671515d8a38b56df8c5bc837bd201ec7b0 | [
"MIT"
] | null | null | null | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse
from django.http import HttpResponse
from django.urls import reverse
from django.views.generic import ListView,DetailView
from .models import Poll, Choice
class IndexView(ListView):
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Poll.objects.order_by('-pub_date')[:5]
class DetailView(DetailView):
model = Poll
class ResultsView(DetailView):
model = Poll
def vote(request, question_id):
question = get_object_or_404(Poll, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| 33 | 82 | 0.704361 | 307 | 0.226903 | 0 | 0 | 0 | 0 | 0 | 0 | 372 | 0.274945 |
5bb28b111d97dd9185bbbe21a689e7be67fc0820 | 864 | py | Python | tests/test_contextmanager.py | eagleshine/invirtualenv | 5f8993dd96070f4c7fc88657c59f56f6706a5544 | [
"BSD-3-Clause"
] | 15 | 2017-03-13T03:35:15.000Z | 2021-08-31T09:34:14.000Z | tests/test_contextmanager.py | eagleshine/invirtualenv | 5f8993dd96070f4c7fc88657c59f56f6706a5544 | [
"BSD-3-Clause"
] | 53 | 2016-04-28T20:49:01.000Z | 2021-06-18T16:40:00.000Z | tests/test_contextmanager.py | eagleshine/invirtualenv | 5f8993dd96070f4c7fc88657c59f56f6706a5544 | [
"BSD-3-Clause"
] | 10 | 2016-05-10T19:22:18.000Z | 2020-06-30T18:24:58.000Z | import os
import unittest
import invirtualenv.contextmanager
class TestContextmanager(unittest.TestCase):
def test__revert_file(self):
with invirtualenv.contextmanager.InTemporaryDirectory():
with open('testfile', 'w') as fh:
fh.write('original')
self.assertEqual('original', open('testfile').read())
with invirtualenv.contextmanager.revert_file('testfile'):
with open('testfile', 'w') as fh:
fh.write('changed')
self.assertEqual('changed', open('testfile').read())
self.assertEqual('original', open('testfile').read())
def test__InTemporaryDir(self):
with invirtualenv.contextmanager.InTemporaryDirectory() as tempdir:
self.assertIsInstance(tempdir, str)
self.assertTrue(os.path.exists(tempdir))
| 39.272727 | 75 | 0.638889 | 800 | 0.925926 | 0 | 0 | 0 | 0 | 0 | 0 | 114 | 0.131944 |
5bb59ff29485da299bf3f36eb02b24fa0c1f1983 | 6,731 | py | Python | xero/filesmanager.py | Ian2020/pyxero | 187f00fc535c585fa741520d1bdd9500abf3dcca | [
"BSD-3-Clause"
] | 246 | 2015-02-11T19:38:57.000Z | 2022-03-29T10:13:52.000Z | xero/filesmanager.py | Ian2020/pyxero | 187f00fc535c585fa741520d1bdd9500abf3dcca | [
"BSD-3-Clause"
] | 209 | 2015-01-07T01:28:12.000Z | 2022-03-09T19:06:40.000Z | xero/filesmanager.py | Ian2020/pyxero | 187f00fc535c585fa741520d1bdd9500abf3dcca | [
"BSD-3-Clause"
] | 206 | 2015-01-10T22:33:08.000Z | 2022-03-16T14:07:28.000Z | from __future__ import unicode_literals
import os
import requests
from six.moves.urllib.parse import parse_qs
from .constants import XERO_FILES_URL
from .exceptions import (
XeroBadRequest,
XeroExceptionUnknown,
XeroForbidden,
XeroInternalError,
XeroNotAvailable,
XeroNotFound,
XeroNotImplemented,
XeroRateLimitExceeded,
XeroUnauthorized,
XeroUnsupportedMediaType,
)
class FilesManager(object):
DECORATED_METHODS = (
"get",
"all",
"create",
"save",
"delete",
"get_files",
"upload_file",
"get_association",
"get_associations",
"make_association",
"delete_association",
"get_content",
)
def __init__(self, name, credentials):
self.credentials = credentials
self.name = name
self.base_url = credentials.base_url + XERO_FILES_URL
for method_name in self.DECORATED_METHODS:
method = getattr(self, "_%s" % method_name)
setattr(self, method_name, self._get_data(method))
def _get_results(self, data):
response = data["Response"]
if self.name in response:
result = response[self.name]
elif "Attachments" in response:
result = response["Attachments"]
else:
return None
if isinstance(result, tuple) or isinstance(result, list):
return result
if isinstance(result, dict) and self.singular in result:
return result[self.singular]
def _get_data(self, func):
""" This is the decorator for our DECORATED_METHODS.
Each of the decorated methods must return:
uri, params, method, body, headers, singleobject
"""
def wrapper(*args, **kwargs):
uri, params, method, body, headers, singleobject, files = func(
*args, **kwargs
)
response = getattr(requests, method)(
uri,
data=body,
headers=headers,
auth=self.credentials.oauth,
params=params,
files=files,
)
if response.status_code == 200 or response.status_code == 201:
if response.headers["content-type"].startswith("application/json"):
return response.json()
else:
# return a byte string without doing any Unicode conversions
return response.content
# Delete will return a response code of 204 - No Content
elif response.status_code == 204:
return "Deleted"
elif response.status_code == 400:
raise XeroBadRequest(response)
elif response.status_code == 401:
raise XeroUnauthorized(response)
elif response.status_code == 403:
raise XeroForbidden(response)
elif response.status_code == 404:
raise XeroNotFound(response)
elif response.status_code == 415:
raise XeroUnsupportedMediaType(response)
elif response.status_code == 500:
raise XeroInternalError(response)
elif response.status_code == 501:
raise XeroNotImplemented(response)
elif response.status_code == 503:
# Two 503 responses are possible. Rate limit errors
# return encoded content; offline errors don't.
# If you parse the response text and there's nothing
# encoded, it must be a not-available error.
payload = parse_qs(response.text)
if payload:
raise XeroRateLimitExceeded(response, payload)
else:
raise XeroNotAvailable(response)
else:
raise XeroExceptionUnknown(response)
return wrapper
def _get(self, id, headers=None):
uri = "/".join([self.base_url, self.name, id])
return uri, {}, "get", None, headers, True, None
def _get_files(self, folderId):
"""Retrieve the list of files contained in a folder"""
uri = "/".join([self.base_url, self.name, folderId, "Files"])
return uri, {}, "get", None, None, False, None
def _get_associations(self, id):
uri = "/".join([self.base_url, self.name, id, "Associations"]) + "/"
return uri, {}, "get", None, None, False, None
def _get_association(self, fileId, objectId):
uri = "/".join([self.base_url, self.name, fileId, "Associations", objectId])
return uri, {}, "get", None, None, False, None
def _delete_association(self, fileId, objectId):
uri = "/".join([self.base_url, self.name, fileId, "Associations", objectId])
return uri, {}, "delete", None, None, False, None
def create_or_save(self, data, method="post", headers=None, summarize_errors=True):
if "Id" not in data:
uri = "/".join([self.base_url, self.name])
else:
uri = "/".join([self.base_url, self.name, data["Id"]])
body = data
if summarize_errors:
params = {}
else:
params = {"summarizeErrors": "false"}
return uri, params, method, body, headers, False, None
def _create(self, data):
return self.create_or_save(data, method="post")
def _save(self, data, summarize_errors=True):
return self.create_or_save(
data, method="put", summarize_errors=summarize_errors
)
def _delete(self, id):
uri = "/".join([self.base_url, self.name, id])
return uri, {}, "delete", None, None, False, None
def _upload_file(self, path, folderId=None):
if folderId is not None:
uri = "/".join([self.base_url, self.name, folderId])
else:
uri = "/".join([self.base_url, self.name])
filename = self.filename(path)
files = dict()
files[filename] = open(path, mode="rb")
return uri, {}, "post", None, None, False, files
def _get_content(self, fileId):
uri = "/".join([self.base_url, self.name, fileId, "Content"])
return uri, {}, "get", None, None, False, None
def _make_association(self, id, data):
uri = "/".join([self.base_url, self.name, id, "Associations"])
body = data
return uri, {}, "post", body, None, False, None
def _all(self):
uri = "/".join([self.base_url, self.name])
return uri, {}, "get", None, None, False, None
def filename(self, path):
head, tail = os.path.split(path)
return tail or os.path.basename(head)
| 33.321782 | 87 | 0.576437 | 6,319 | 0.938791 | 0 | 0 | 0 | 0 | 0 | 0 | 989 | 0.146932 |
5bb69ffabb9285aa28e934c2aba594cdec056ae0 | 519 | py | Python | jobs/migrations/0055_savedfeatureselection_uid.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 95 | 2017-09-29T13:20:38.000Z | 2022-03-14T06:43:47.000Z | jobs/migrations/0055_savedfeatureselection_uid.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 229 | 2015-07-29T08:50:27.000Z | 2017-09-21T18:05:56.000Z | jobs/migrations/0055_savedfeatureselection_uid.py | hotosm/hot-exports-two | d60530445e89b2a46bd55ea3b7c2e72409b0f493 | [
"BSD-3-Clause"
] | 30 | 2017-10-06T23:53:48.000Z | 2022-03-10T06:17:07.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-06-26 12:06
from __future__ import unicode_literals
from django.db import migrations, models
import uuid
class Migration(migrations.Migration):
dependencies = [
('jobs', '0054_savedfeatureselection'),
]
operations = [
migrations.AddField(
model_name='savedfeatureselection',
name='uid',
field=models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True),
),
]
| 23.590909 | 99 | 0.645472 | 352 | 0.678227 | 0 | 0 | 0 | 0 | 0 | 0 | 130 | 0.250482 |
5bb783371c1c6bac129e190c83ca0c821e863a44 | 6,194 | py | Python | unified_planning/plans/partial_order_plan.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | 9 | 2022-02-18T14:51:58.000Z | 2022-03-31T06:02:43.000Z | unified_planning/plans/partial_order_plan.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | 37 | 2022-02-01T10:44:38.000Z | 2022-03-31T09:13:42.000Z | unified_planning/plans/partial_order_plan.py | aiplan4eu/unified-planning | d2fd18baa3a2110595e5dfdc3f55254df72c3016 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 AIPlan4EU project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import networkx as nx # type: ignore
import unified_planning as up
import unified_planning.plans as plans
from unified_planning.environment import Environment
from unified_planning.exceptions import UPUsageError
from unified_planning.plans.plan import ActionInstance
from unified_planning.plans.sequential_plan import SequentialPlan
from typing import Callable, Dict, Iterator, List, Optional
class PartialOrderPlan(plans.plan.Plan):
'''Represents a partial order plan. Actions are represent as an adjacency list graph.'''
def __init__(self,
adjacency_list: Dict['plans.plan.ActionInstance', List['plans.plan.ActionInstance']],
environment: Optional['Environment'] = None,
_graph: Optional[nx.DiGraph] = None
):
'''Constructs the PartialOrderPlan using the adjacency list representation.
:param adjacency_list: The Dictionary representing the adjacency list for this PartialOrderPlan.
:param env: The environment in which the ActionInstances in the adjacency_list are created.
:param _graph: The graph that is semnatically equivalent to the adjacency_list.
NOTE: This parameter is for internal use only and it's maintainance is not guaranteed by any means.
:return: The created PartialOrderPlan.
'''
# if we have a specific env or we don't have any actions
if environment is not None or not adjacency_list:
plans.plan.Plan.__init__(self, plans.plan.PlanKind.PARTIAL_ORDER_PLAN, environment)
# If we don't have a specific env, use the env of the first action
else:
assert len(adjacency_list) > 0
for ai in adjacency_list.keys():
plans.plan.Plan.__init__(self, plans.plan.PlanKind.PARTIAL_ORDER_PLAN, ai.action.env)
break
if _graph is not None:
# sanity checks
assert len(adjacency_list) == 0
assert all(isinstance(n, ActionInstance) for n in _graph.nodes)
assert all(isinstance(f, ActionInstance) and isinstance(t, ActionInstance) \
for f, t in _graph.edges)
self._graph = _graph
else:
for ai_k, ai_v_list in adjacency_list.items(): # check that given env and the env in the actions is the same
if ai_k.action.env != self._environment:
raise UPUsageError('The environment given to the plan is not the same of the actions in the plan.')
for ai in ai_v_list:
if ai.action.env != self._environment:
raise UPUsageError('The environment given to the plan is not the same of the actions in the plan.')
self._graph = nx.convert.from_dict_of_lists(adjacency_list, create_using=nx.DiGraph)
def __repr__(self) -> str:
return str(self._graph )
def __eq__(self, oth: object) -> bool:
if isinstance(oth, PartialOrderPlan):
return nx.is_isomorphic(self._graph, oth._graph, node_match=_semantically_equivalent_action_instances)
else:
return False
def __hash__(self) -> int:
return hash(nx.weisfeiler_lehman_graph_hash(self._graph))
def __contains__(self, item: object) -> bool:
if isinstance(item, ActionInstance):
return any(item.is_semantically_equivalent(a) for a in self._graph.nodes)
else:
return False
@property
def get_adjacency_list(self) -> Dict['plans.plan.ActionInstance', List['plans.plan.ActionInstance']]:
'''Returns the graph of action instances as an adjacency list.'''
return nx.convert.to_dict_of_lists(self._graph)
def replace_action_instances(self, replace_function: Callable[['plans.plan.ActionInstance'], 'plans.plan.ActionInstance']) -> 'plans.plan.Plan':
new_adj_list: Dict['plans.plan.ActionInstance', List['plans.plan.ActionInstance']] = {}
# Populate the new adjacency list with the replaced action instances
for node in self._graph.nodes:
new_adj_list[replace_function(node)] = [replace_function(successor) for successor in self._graph.neighbors(node)]
new_env = self._environment
for ai in new_adj_list.keys():
new_env = ai.action.env
break
return PartialOrderPlan(new_adj_list, new_env)
def to_sequential_plan(self) -> SequentialPlan:
'''Returns one between all possible SequentialPlans that respects the ordering constaints given by this PartialOrderPlan.'''
return SequentialPlan(list(nx.topological_sort(self._graph)), self._environment)
def all_sequential_plans(self) -> Iterator[SequentialPlan]:
'''Returns all possible SequentialPlans that respects the ordering constaints given by this PartialOrderPlan.'''
for sorted_plan in nx.all_topological_sorts(self._graph):
yield SequentialPlan(list(sorted_plan), self._environment)
def get_neighbors(self, action_instance: ActionInstance) -> Iterator[ActionInstance]:
try:
retval = self._graph.neighbors(action_instance)
except nx.NetworkXError:
raise UPUsageError(f'The action instance {str(action_instance)} does not belong to this Partial Order Plan. \n Note that 2 Action Instances are equals if and only if they are the exact same object.')
return retval
def _semantically_equivalent_action_instances(action_instance_1: ActionInstance, action_instance_2: ActionInstance) -> bool:
return action_instance_1.is_semantically_equivalent(action_instance_2)
| 51.616667 | 211 | 0.700355 | 5,011 | 0.809009 | 317 | 0.051179 | 245 | 0.039554 | 0 | 0 | 2,361 | 0.381175 |
5bb8dc2b9919fe922dd397b3d18c6984fcf2da3a | 71 | py | Python | tests/params.py | cponecp/iHone | 91096b085d72afa97a6e640c082564ef500f5be8 | [
"MIT"
] | null | null | null | tests/params.py | cponecp/iHone | 91096b085d72afa97a6e640c082564ef500f5be8 | [
"MIT"
] | null | null | null | tests/params.py | cponecp/iHone | 91096b085d72afa97a6e640c082564ef500f5be8 | [
"MIT"
] | null | null | null | def fun(default=None,**kwargs):
print(1)
fun(user='cp',default={}) | 17.75 | 31 | 0.633803 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4 | 0.056338 |
5bb90766f7b85944500acf3f02a74757b861fc3d | 8,492 | py | Python | orquesta/tests/unit/graphing/native/test_routes_split.py | batk0/orquesta | f03f3f2f3820bf111a9277f4f6c5d6c83a89d004 | [
"Apache-2.0"
] | null | null | null | orquesta/tests/unit/graphing/native/test_routes_split.py | batk0/orquesta | f03f3f2f3820bf111a9277f4f6c5d6c83a89d004 | [
"Apache-2.0"
] | null | null | null | orquesta/tests/unit/graphing/native/test_routes_split.py | batk0/orquesta | f03f3f2f3820bf111a9277f4f6c5d6c83a89d004 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from orquesta.tests.unit.composition.native import base
class SplitWorkflowRoutesTest(base.OrchestraWorkflowComposerTest):
def test_split(self):
wf_name = 'split'
expected_routes = [
{
'tasks': [
'task1',
'task2',
'task4__1',
'task5__1',
'task6__1',
'task7__1',
],
'path': [
('task1', 'task2', 0),
('task2', 'task4__1', 0),
('task4__1', 'task5__1', 0),
('task4__1', 'task6__1', 0),
('task5__1', 'task7__1', 0),
('task6__1', 'task7__1', 0),
]
},
{
'tasks': [
'task1',
'task3',
'task4__2',
'task5__2',
'task6__2',
'task7__2'
],
'path': [
('task1', 'task3', 0),
('task3', 'task4__2', 0),
('task4__2', 'task5__2', 0),
('task4__2', 'task6__2', 0),
('task5__2', 'task7__2', 0),
('task6__2', 'task7__2', 0)
]
}
]
self.assert_wf_ex_routes(wf_name, expected_routes)
def test_splits(self):
wf_name = 'splits'
expected_routes = [
{
'tasks': [
'task1',
'task8__1'
],
'path': [
('task1', 'task8__1', 0)
]
},
{
'tasks': [
'task1',
'task2',
'task4__1',
'task5__1',
'task6__1',
'task7__1',
'task8__2'
],
'path': [
('task1', 'task2', 0),
('task2', 'task4__1', 0),
('task4__1', 'task5__1', 0),
('task4__1', 'task6__1', 0),
('task5__1', 'task7__1', 0),
('task6__1', 'task7__1', 0),
('task7__1', 'task8__2', 0)
]
},
{
'tasks': [
'task1',
'task3',
'task4__2',
'task5__2',
'task6__2',
'task7__2',
'task8__3'
],
'path': [
('task1', 'task3', 0),
('task3', 'task4__2', 0),
('task4__2', 'task5__2', 0),
('task4__2', 'task6__2', 0),
('task5__2', 'task7__2', 0),
('task6__2', 'task7__2', 0),
('task7__2', 'task8__3', 0)
]
}
]
self.assert_wf_ex_routes(wf_name, expected_routes)
def test_nested_splits(self):
wf_name = 'splits-nested'
expected_routes = [
{
'tasks': [
'task1',
'task10__1',
'task2',
'task4__1',
'task5__1',
'task7__1',
'task8__1',
'task9__1'
],
'path': [
('task1', 'task2', 0),
('task2', 'task4__1', 0),
('task4__1', 'task5__1', 0),
('task5__1', 'task7__1', 0),
('task7__1', 'task8__1', 0),
('task7__1', 'task9__1', 0),
('task8__1', 'task10__1', 0),
('task9__1', 'task10__1', 0)
]
},
{
'tasks': [
'task1',
'task10__2',
'task2',
'task4__1',
'task6__1',
'task7__2',
'task8__2',
'task9__2'
],
'path': [
('task1', 'task2', 0),
('task2', 'task4__1', 0),
('task4__1', 'task6__1', 0),
('task6__1', 'task7__2', 0),
('task7__2', 'task8__2', 0),
('task7__2', 'task9__2', 0),
('task8__2', 'task10__2', 0),
('task9__2', 'task10__2', 0)
]
},
{
'tasks': [
'task1',
'task10__3',
'task3',
'task4__2',
'task5__2',
'task7__3',
'task8__3',
'task9__3'
],
'path': [
('task1', 'task3', 0),
('task3', 'task4__2', 0),
('task4__2', 'task5__2', 0),
('task5__2', 'task7__3', 0),
('task7__3', 'task8__3', 0),
('task7__3', 'task9__3', 0),
('task8__3', 'task10__3', 0),
('task9__3', 'task10__3', 0)
]
},
{
'tasks': [
'task1',
'task10__4',
'task3',
'task4__2',
'task6__2',
'task7__4',
'task8__4',
'task9__4'
],
'path': [
('task1', 'task3', 0),
('task3', 'task4__2', 0),
('task4__2', 'task6__2', 0),
('task6__2', 'task7__4', 0),
('task7__4', 'task8__4', 0),
('task7__4', 'task9__4', 0),
('task8__4', 'task10__4', 0),
('task9__4', 'task10__4', 0)
]
}
]
self.assert_wf_ex_routes(wf_name, expected_routes)
def test_splits_extra_join(self):
wf_name = 'splits-join'
expected_routes = [
{
'tasks': [
'task1',
'task2',
'task4__1',
'task5__1',
'task6__1',
'task7__1',
'task8__1'
],
'path': [
('task1', 'task2', 0),
('task1', 'task8__1', 0),
('task2', 'task4__1', 0),
('task4__1', 'task5__1', 0),
('task4__1', 'task6__1', 0),
('task5__1', 'task7__1', 0),
('task6__1', 'task7__1', 0),
('task7__1', 'task8__1', 0)
]
},
{
'tasks': [
'task1',
'task3',
'task4__2',
'task5__2',
'task6__2',
'task7__2',
'task8__2'
],
'path': [
('task1', 'task3', 0),
('task1', 'task8__2', 0),
('task3', 'task4__2', 0),
('task4__2', 'task5__2', 0),
('task4__2', 'task6__2', 0),
('task5__2', 'task7__2', 0),
('task6__2', 'task7__2', 0),
('task7__2', 'task8__2', 0)
]
}
]
self.assert_wf_ex_routes(wf_name, expected_routes)
| 32.166667 | 74 | 0.321597 | 7,888 | 0.928874 | 0 | 0 | 0 | 0 | 0 | 0 | 2,809 | 0.330782 |
5bbbb9f34d714ae4dbca7523d97576b3cc0761f0 | 227 | py | Python | test_client.py | lilydjwg/udt_py | 90cb36f3ef503bb45a9aa4f9dad5c86eddce0abd | [
"BSD-3-Clause"
] | 9 | 2015-01-11T06:59:40.000Z | 2022-02-02T14:57:59.000Z | test_client.py | lilydjwg/udt_py | 90cb36f3ef503bb45a9aa4f9dad5c86eddce0abd | [
"BSD-3-Clause"
] | 1 | 2015-01-15T21:03:51.000Z | 2015-01-16T03:05:25.000Z | test_client.py | lilydjwg/udt_py | 90cb36f3ef503bb45a9aa4f9dad5c86eddce0abd | [
"BSD-3-Clause"
] | 1 | 2020-06-20T08:39:06.000Z | 2020-06-20T08:39:06.000Z | #!/usr/bin/env python3
import udt
import socket
import time
s = udt.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
s.connect(("localhost", 5555))
print("Sending...")
s.send(b"Hello", 0)
buf = s.recv(1024, 0)
print(repr(buf))
| 16.214286 | 53 | 0.696035 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 53 | 0.23348 |
5bbf42ace722c24c81f330e4307be610dd8dbc8b | 1,907 | py | Python | core_lib/rule_validator/rule_validator_decorator.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
] | null | null | null | core_lib/rule_validator/rule_validator_decorator.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
] | 9 | 2021-03-11T02:29:17.000Z | 2022-03-22T19:01:18.000Z | core_lib/rule_validator/rule_validator_decorator.py | shubham-surya/core-lib | 543db80706746a937e5ed16bd50f2de8d58b32e4 | [
"MIT"
] | 2 | 2022-01-27T11:19:00.000Z | 2022-02-11T11:33:09.000Z | from functools import wraps
from core_lib.helpers.func_utils import get_func_parameter_index_by_name
from core_lib.rule_validator.rule_validator import RuleValidator
class ParameterRuleValidator(object):
def __init__(self,
rule_validator: RuleValidator,
parameter_name: str,
strict_mode: bool = None,
mandatory_keys: list = None,
prohibited_keys: list = None):
if not parameter_name:
raise ValueError("ParameterRuleValidator: parameter_name missing")
if not rule_validator:
raise ValueError("ParameterRuleValidator: rule_validator missing")
self.parameter_name = parameter_name
self.rule_validator = rule_validator
self.strict_mode = strict_mode
self.mandatory_keys = mandatory_keys
self.prohibited_keys = prohibited_keys
def __call__(self, func):
@wraps(func)
def __wrapper(*args, **kwargs):
parameter_index = get_func_parameter_index_by_name(func, self.parameter_name)
update_dict = args[parameter_index]
if not isinstance(update_dict, dict):
raise ValueError("`ParameterRuleValidator`. function `{}`, parameter `{}`. apply only when updating the database with `dict` parameters ".format(func.__name__, self.parameter_name))
updated_dict = self.rule_validator.validate_dict(update_dict,
strict_mode=self.strict_mode,
mandatory_keys=self.mandatory_keys,
prohibited_keys=self.prohibited_keys)
new_args = list(args)
new_args[parameter_index] = updated_dict
return func(*tuple(new_args), **kwargs)
return __wrapper
| 38.918367 | 197 | 0.616675 | 1,737 | 0.910855 | 0 | 0 | 942 | 0.49397 | 0 | 0 | 216 | 0.113267 |
5bbfa07e6200a39297ee7b945822d94abec3bd05 | 2,058 | py | Python | logistic_regression_08/main.py | michaellengyel/cifar_image_recognition | ad8dc5784e32907762366018270412c8975719d2 | [
"Apache-2.0"
] | null | null | null | logistic_regression_08/main.py | michaellengyel/cifar_image_recognition | ad8dc5784e32907762366018270412c8975719d2 | [
"Apache-2.0"
] | null | null | null | logistic_regression_08/main.py | michaellengyel/cifar_image_recognition | ad8dc5784e32907762366018270412c8975719d2 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
class LogisticRegression(nn.Module):
def __init__(self, n_input_features):
super(LogisticRegression, self).__init__()
self.linear = nn.Linear(n_input_features, 1)
def forward(self, x):
y_predicted = torch.sigmoid(self.linear(x))
return y_predicted
def main():
# 1. Preparing coco
bc = datasets.load_breast_cancer()
X, y = bc.data, bc.target
n_samples, n_features = X.shape
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1234)
# scale features
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.transform(x_test)
x_train = torch.from_numpy(x_train.astype(np.float32))
x_test = torch.from_numpy(x_test.astype(np.float32))
y_train = torch.from_numpy(y_train.astype(np.float32))
y_test = torch.from_numpy(y_test.astype(np.float32))
y_train = y_train.view(y_train.shape[0], 1)
y_test = y_test.view(y_test.shape[0], 1)
# 2. Model
model = LogisticRegression(n_features)
# 3. Loss and Optimizer
learning_rate = 0.01
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
# 4. Training Loop
num_epochs = 100
for epoch in range(num_epochs):
# Forward Pass
y_predicted = model(x_train)
loss = criterion(y_predicted, y_train)
# Backward Pass
loss.backward()
# Optimizer
optimizer.step()
if (epoch + 1) % 10 == 0:
print(f'epoch: {epoch + 1}, loss = {loss.item():.4f}')
with torch.no_grad():
y_predicted = model(x_test)
y_predicted_cls = y_predicted.round()
acc = y_predicted_cls.eq(y_test).sum() / float(y_test.shape[0])
print(f'accuracy = {acc:.4f}')
if __name__ == '__main__':
main()
| 26.384615 | 95 | 0.661808 | 289 | 0.140428 | 0 | 0 | 0 | 0 | 0 | 0 | 206 | 0.100097 |
5bbfc3eb5cd5c998c4f1c0a160a1b3c4c4e0bd4e | 1,060 | py | Python | obkey_parts/__version__.py | evyd13/obkey3 | bb49ed6d6696299a410c43d0ba6471ee3f594c26 | [
"MIT"
] | 4 | 2018-12-17T03:53:26.000Z | 2022-01-06T19:40:02.000Z | obkey_parts/__version__.py | evyd13/obkey3 | bb49ed6d6696299a410c43d0ba6471ee3f594c26 | [
"MIT"
] | 9 | 2016-05-01T09:42:23.000Z | 2022-01-10T11:10:12.000Z | obkey_parts/__version__.py | evyd13/obkey3 | bb49ed6d6696299a410c43d0ba6471ee3f594c26 | [
"MIT"
] | 6 | 2019-03-11T13:14:22.000Z | 2022-01-02T23:55:17.000Z | """
Obkey package informations.
This file is a part of Openbox Key Editor
Code under GPL (originally MIT) from version 1.3 - 2018.
See Licenses information in ../obkey .
"""
MAJOR = 1
MINOR = 3
PATCH = 2
__version__ = "{0}.{1}.{2}".format(MAJOR, MINOR, PATCH)
__description__ = 'Openbox Key Editor'
__long_description__ = """
A keybinding editor for OpenBox, it includes launchers and window management keys.
It allows to:
* can check almost all keybinds in one second.
* add new keybinds, the default key associated will be 'a' and no action will be associated;
* add new child keybinds;
* setup existing keybinds :
* add/remove/sort/setup actions in the actions list;
* change the keybind by clicking on the item in the list;
* duplicate existing keybinds;
* remove keybinds.
The current drawbacks :
* XML inculsion is not managed. If you want to edit many files, then you shall open them with `obkey <config file>.xml`;
* `if` conditionnal tag is not supported (but did you knew it exists).
"""
| 32.121212 | 124 | 0.700943 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 941 | 0.887736 |
5bc0cb9ffa299ad9d23aa50d9179c1903fd178ee | 811 | py | Python | PSA/modules/BroEventDispatcher.py | SECURED-FP7/secured-psa-nsm | 20c8f790ebc2d2aa8c33bda1e047f8f29275a0be | [
"Apache-2.0"
] | null | null | null | PSA/modules/BroEventDispatcher.py | SECURED-FP7/secured-psa-nsm | 20c8f790ebc2d2aa8c33bda1e047f8f29275a0be | [
"Apache-2.0"
] | null | null | null | PSA/modules/BroEventDispatcher.py | SECURED-FP7/secured-psa-nsm | 20c8f790ebc2d2aa8c33bda1e047f8f29275a0be | [
"Apache-2.0"
] | null | null | null | # -*- Mode:Python;indent-tabs-mode:nil; -*-
#
# BroEventDispatcher.py
#
# A simple event dispatcher.
#
# Author: jju / VTT Technical Research Centre of Finland Ltd., 2016
#
import logging
callbacks = { }
def init():
pass
def register( key, obj ):
"""
Register a callback for key 'key'
"""
global callbacks
callbacks[ key ] = obj
def unregister( key ):
"""
Unregisters callback for key 'key'
"""
global callbacks
del callbacks[ key ]
def dispatch( key, data ):
"""
Dispatch event 'data' to the callback registered for key 'key'
"""
global callbacks
try:
cb = callbacks[ key ]
if cb != None:
cb.onEvent( data )
except Exception as e:
logging.warning( 'No dispatcher for key: ' + key + ': ' + str( e ) )
| 19.309524 | 76 | 0.588163 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 371 | 0.45746 |
5bc10292408a422083d964a15112bb688f34a1cb | 3,592 | py | Python | scripts/molecules.py | abelcarreras/PyQchem | 2edf984ba17373ad3fd450b18592c8b7827b72e5 | [
"MIT"
] | 16 | 2020-03-06T00:15:16.000Z | 2022-02-21T12:54:46.000Z | scripts/molecules.py | abelcarreras/qchem_scripts | 992fc3d650f3b7651c63aba5759ba0f986eccffe | [
"MIT"
] | 3 | 2020-12-24T12:44:14.000Z | 2021-03-30T03:11:16.000Z | scripts/molecules.py | abelcarreras/qchem_scripts | 992fc3d650f3b7651c63aba5759ba0f986eccffe | [
"MIT"
] | 3 | 2020-06-05T20:55:41.000Z | 2021-03-23T18:17:15.000Z | from pyqchem.structure import Structure
import numpy as np
# Ethene parallel position
def dimer_ethene(distance, slide_y, slide_z):
coordinates = [[0.0000000, 0.0000000, 0.6660120],
[0.0000000, 0.0000000, -0.6660120],
[0.0000000, 0.9228100, 1.2279200],
[0.0000000, -0.9228100, 1.2279200],
[0.0000000, -0.9228100, -1.2279200],
[0.0000000, 0.9228100, -1.2279200],
[distance, 0.0000000, 0.6660120],
[distance, 0.0000000, -0.6660120],
[distance, 0.9228100, 1.2279200],
[distance, -0.9228100, 1.2279200],
[distance, -0.9228100, -1.2279200],
[distance, 0.9228100, -1.2279200]]
coordinates = np.array(coordinates)
coordinates[6:, 1] = coordinates[6:, 1] + slide_y
coordinates[6:, 2] = coordinates[6:, 2] + slide_z
symbols = ['C', 'C', 'H', 'H', 'H', 'H', 'C', 'C', 'H', 'H', 'H', 'H']
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': 6}
# Tetracloroethene
def dimer_tetrafluoroethene(distance, slide_y, slide_z):
monomer = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer)
#monomer2 = np.dot(monomer, rotation_matrix([0, 1, 0], np.pi / 2))
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer, monomer2])
molecule = Structure(coordinates=coordinates,
symbols=symbols * 2,
charge=0)
return molecule, {'state_threshold': 0.2,
'n_mon': len(monomer)}
# Tetracloroethene
def dimer_mix(distance, slide_y, slide_z):
monomer1 = [[ 0.6660120, 0.0000000, 0.0000000,],
[-0.6660120, 0.0000000, 0.0000000,],
[ 1.2279200, 0.9228100, 0.0000000,],
[ 1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, -0.9228100, 0.0000000,],
[-1.2279200, 0.9228100, 0.0000000,]]
symbols1 = ['C', 'C', 'H', 'H', 'H', 'H']
monomer2 = [[ 0.6624670117, 0.0000000000, 0.0000000000],
[-0.6624670117, 0.0000000000, 0.0000000000],
[ 1.3834661472, 1.0993897934, 0.0000000000],
[ 1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, -1.0993897934, 0.0000000000],
[-1.3834661472, 1.0993897934, 0.0000000000]]
symbols2 = ['C', 'C', 'F', 'F', 'F', 'F']
monomer2 = np.array(monomer2)
monomer2[:, 2] = monomer2[:, 2] + distance
monomer2[:, 1] = monomer2[:, 1] + slide_y
monomer2[:, 0] = monomer2[:, 0] + slide_z
coordinates = np.vstack([monomer1, monomer2])
symbols = symbols1 + symbols2
molecule = Structure(coordinates=coordinates,
symbols=symbols,
charge=0)
return molecule, {'state_threshold': 0.4,
'n_mon': len(monomer1)} | 36.653061 | 74 | 0.52422 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.080735 |
5bc343c63bd416a3914682e8b9480f3267efc3a6 | 1,776 | py | Python | twitter_monitor/twitter_bot_runner.py | coffeerightnow/TwitterMonitor | 91ce0647f0ec7a81bae6e5788d37221230a2d9b5 | [
"MIT"
] | null | null | null | twitter_monitor/twitter_bot_runner.py | coffeerightnow/TwitterMonitor | 91ce0647f0ec7a81bae6e5788d37221230a2d9b5 | [
"MIT"
] | null | null | null | twitter_monitor/twitter_bot_runner.py | coffeerightnow/TwitterMonitor | 91ce0647f0ec7a81bae6e5788d37221230a2d9b5 | [
"MIT"
] | null | null | null | import logging
from .config_loader import BackendConfig
import time
from .data_processing import TwitterStreamListener, TweetProcessor, TweetQueue
# load configuration
config = BackendConfig()
# setup Logging
logging.basicConfig(format='%(asctime)s %(message)s', level=config.log_level, filename='twitter_log.log')
# create TweetQueue to buffer the incoming tweets
tweet_queue = TweetQueue()
# start the twitter bot for collecting tweets into the tweet queue
twitter_bot = TwitterStreamListener(tweet_queue=tweet_queue, config=config)
tweet_processor = TweetProcessor(tweet_queue=tweet_queue, config=config)
tweet_processor.start()
# starting the tweet_processor thread for dequeuing the tweets and persist them to db.
twitter_bot.start_listener()
# recovery mechanism
while True:
if tweet_queue.get_seconds_since_last_tweet() > config.twitter['recovery_time']:
# close connection
logging.critical(
"restarting Twitter Listener and data Processor because more than "
"recovery_time no Tweets arrived.")
try:
twitter_bot.twitterStream.disconnect()
logging.critical("Twitter Stream disconntected...")
tweet_processor.set_dead()
tweet_processor.join()
except:
logging.critical("Twitter Stream disconntect was not possible.")
# restart
tweet_queue = TweetQueue()
tweet_processor = TweetProcessor(tweet_queue=tweet_queue, config=config)
tweet_processor.start()
twitter_bot = TwitterStreamListener(tweet_queue=tweet_queue, config=config)
twitter_bot.start_listener()
logging.debug("Time passed since last tweet arrived")
logging.debug(tweet_queue.get_seconds_since_last_tweet())
time.sleep(60)
| 37 | 105 | 0.739302 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 558 | 0.314189 |
5bc378b2e4d4522b4b61e8ed9d281f74655fd8f8 | 5,651 | py | Python | lib/bot/__init__.py | ItsZabbs/Pokedex-Bot | 0dfe38f939712ff0478eada1ff4c44226f6013c0 | [
"MIT"
] | null | null | null | lib/bot/__init__.py | ItsZabbs/Pokedex-Bot | 0dfe38f939712ff0478eada1ff4c44226f6013c0 | [
"MIT"
] | null | null | null | lib/bot/__init__.py | ItsZabbs/Pokedex-Bot | 0dfe38f939712ff0478eada1ff4c44226f6013c0 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
import dotenv
import os
from glob import glob
import traceback
from ..db import db
# Loading the environment variables
dotenv.load_dotenv()
token = os.getenv("BOT_TOKEN")
error_guild_id = os.getenv("GUILD_ID")
error_channel_id = os.getenv("CHANNEL_ID")
feedback_channel_id=os.getenv("FEEDBACK_ID")
guild_logs_id=os.getenv('GUILD_LOG_ID')
intents = discord.Intents.default()
# Allowing mentions in messages of the bot
mentions = discord.AllowedMentions(everyone=False, users=True, roles=False, replied_user=True)
# Cogs
COGS = [path.split("\\")[-1][:-3] for path in glob("./lib/cogs/*.py")]
# Owner IDS
OWNER_ID = 650664682046226432
# Prefix cache implementation
prefix_cache={}
async def get_prefix(user,message):
if message.guild is None:
prefix = "dexy"
return commands.when_mentioned_or(prefix)(user, message)
elif message.guild.id in prefix_cache:
return commands.when_mentioned_or(*prefix_cache[message.guild.id])(user, message)
else:
prefix=db.field("SELECT Prefix FROM guilds WHERE GuildID = ?",message.guild.id)
try:
prefix=prefix.split(",")
except:
prefix=['dexy']
if len(prefix_cache)>120:
prefix_cache.pop(tuple(prefix_cache.keys())[0])
prefix_cache[message.guild.id]=prefix
return commands.when_mentioned_or(*prefix_cache[message.guild.id])(user, message)
async def update():
for guild in bot.guilds:
try:
db.execute("INSERT INTO guilds (GuildID) VALUES (?)", guild.id)
except:
pass
class Bot(commands.Bot):
def __init__(self):
self.TOKEN = token
self.ready = False
self.owner_id = OWNER_ID
self.reconnect = True
self.prefix_cache=prefix_cache
self.feedback_webhook=feedback_channel_id
super().__init__(case_insensitive=True, allowed_mentions=mentions, intents=intents,
command_prefix=get_prefix,strip_after_prefix=True,
owner_id=OWNER_ID,max_messages=None)
def setup(self):
for ext in os.listdir("./lib/cogs"):
if ext.endswith(".py") and not ext.startswith("_"):
try:
self.load_extension(f"lib.cogs.{ext[:-3]}")
print(f" {ext[:-3]} cog loaded")
except Exception:
desired_trace = traceback.format_exc()
print(desired_trace)
print("setup complete")
self.load_extension('jishaku')
print("jishaku loaded")
print("setup complete")
def run(self, version):
print("running setup...")
self.setup()
print("running bot...")
super().run(self.TOKEN, reconnect=True)
async def process_commands(self, message):
ctx = await self.get_context(message, cls=commands.Context)
if ctx.command is not None:
if not self.ready:
await ctx.send("I'm not ready to receive commands. Please wait a few seconds.")
else:
await self.invoke(ctx)
async def on_connect(self):
await update()
self.error_channel:discord.TextChannel=await self.fetch_channel(error_channel_id)
self.error_webhook=await self.error_channel.webhooks()
self.error_webhook=self.error_webhook[0]
self.guild_log:discord.TextChannel=await self.fetch_channel(guild_logs_id)
self.guild_log=await self.guild_log.webhooks()
self.guild_log=self.guild_log[0]
print("updated")
print("bot connected")
async def on_disconnect(self):
print("bot disconnected")
async def on_command_error(self, ctx:commands.Context, err):
embed=discord.Embed(title='An error occurred',colour=ctx.me.colour)
embed.add_field(name='Error description',value=f"```\n{err}\n```",inline=False)
embed.add_field(name='Still confused?',value='Join the [support server](https://discord.gg/FBFTYp7nnq) and ask about this there!')
try:
await ctx.send(embed=embed)#f"Something went wrong \n ```\n{err}\n```")
except:
await ctx.author.send(f"I cannot send embeds or messages in {ctx.channel.mention}!")
try:
embed=discord.Embed(title='Error',description=f'{err}')
embed.add_field(name='Command used -',value=f'{ctx.message.content}',inline=False)
await self.error_webhook.send(embed=embed)
except:
raise err
async def on_guild_join(self, guild:discord.Guild):
try:
db.execute("INSERT INTO guilds (GuildID) VALUES (?)", guild.id)
except:
pass
embed=discord.Embed(title='Guild added',description=f'ID : {guild.id}\n NAME : {guild.name}\n OWNERID : {guild.owner_id}')#\n OWNER_NAME : {guild.owner.name}#{guild.owner.discriminator}')
await self.guild_log.send(embed=embed)
async def on_guild_remove(self, guild:discord.Guild):
embed=discord.Embed(title='Guild left',description=f'ID : {guild.id}\n NAME : {guild.name}\n OWNERID : {guild.owner_id}')# OWNER_NAME : {guild.owner.name}#{guild.owner.discriminator}')
await self.guild_log.send(embed=embed)
# async def on_guild_(self, guild):
# db.execute("DELETE FROM guilds WHERE GuildID = ?", guild.id)
async def on_message(self, message:discord.Message):
if message.author.bot:return
return await super().on_message(message)
async def on_ready(self):
await update()
self.ready = True
print("bot ready")
bot = Bot()
| 35.993631 | 195 | 0.642895 | 4,050 | 0.716687 | 0 | 0 | 0 | 0 | 3,520 | 0.622899 | 1,329 | 0.23518 |
5bc3b76b4051d6f7137b9c441c17bb2450602f26 | 50,408 | py | Python | StackApp/env/lib/python2.7/site-packages/blueprint/backend/files.py | jonathanmusila/StackOverflow-Lite | a9a03f129592c6f741eb4d1e608ca2db0e40bf11 | [
"MIT"
] | null | null | null | StackApp/env/lib/python2.7/site-packages/blueprint/backend/files.py | jonathanmusila/StackOverflow-Lite | a9a03f129592c6f741eb4d1e608ca2db0e40bf11 | [
"MIT"
] | null | null | null | StackApp/env/lib/python2.7/site-packages/blueprint/backend/files.py | jonathanmusila/StackOverflow-Lite | a9a03f129592c6f741eb4d1e608ca2db0e40bf11 | [
"MIT"
] | null | null | null | """
Search for configuration files to include in the blueprint.
"""
import base64
from collections import defaultdict
import errno
import glob
import grp
import hashlib
import logging
import os.path
import pwd
import re
import stat
import subprocess
from blueprint import util
# An extra list of pathnames and MD5 sums that will be checked after no
# match is found in `dpkg`(1)'s list. If a pathname is given as the value
# then that file's contents will be hashed.
#
# Many of these files are distributed with packages and copied from
# `/usr/share` in the `postinst` program.
#
# XXX Update `blueprintignore`(5) if you make changes here.
MD5SUMS = {'/etc/adduser.conf': ['/usr/share/adduser/adduser.conf'],
'/etc/apparmor.d/tunables/home.d/ubuntu':
['2a88811f7b763daa96c20b20269294a4'],
'/etc/apt/apt.conf.d/00CDMountPoint':
['cb46a4e03f8c592ee9f56c948c14ea4e'],
'/etc/apt/apt.conf.d/00trustcdrom':
['a8df82e6e6774f817b500ee10202a968'],
'/etc/chatscripts/provider': ['/usr/share/ppp/provider.chatscript'],
'/etc/default/console-setup':
['0fb6cec686d0410993bdf17192bee7d6',
'b684fd43b74ac60c6bdafafda8236ed3',
'/usr/share/console-setup/console-setup'],
'/etc/default/grub': ['ee9df6805efb2a7d1ba3f8016754a119',
'ad9283019e54cedfc1f58bcc5e615dce'],
'/etc/default/irqbalance': ['7e10d364b9f72b11d7bf7bd1cfaeb0ff'],
'/etc/default/keyboard': ['06d66484edaa2fbf89aa0c1ec4989857'],
'/etc/default/locale': ['164aba1ef1298affaa58761647f2ceba',
'7c32189e775ac93487aa4a01dffbbf76'],
'/etc/default/rcS': ['/usr/share/initscripts/default.rcS'],
'/etc/environment': ['44ad415fac749e0c39d6302a751db3f2'],
'/etc/hosts.allow': ['8c44735847c4f69fb9e1f0d7a32e94c1'],
'/etc/hosts.deny': ['92a0a19db9dc99488f00ac9e7b28eb3d'],
'/etc/initramfs-tools/modules':
['/usr/share/initramfs-tools/modules'],
'/etc/inputrc': ['/usr/share/readline/inputrc'],
'/etc/iscsi/iscsid.conf': ['6c6fd718faae84a4ab1b276e78fea471'],
'/etc/kernel-img.conf': ['f1ed9c3e91816337aa7351bdf558a442'],
'/etc/ld.so.conf': ['4317c6de8564b68d628c21efa96b37e4'],
'/etc/ld.so.conf.d/nosegneg.conf':
['3c6eccf8f1c6c90eaf3eb486cc8af8a3'],
'/etc/networks': ['/usr/share/base-files/networks'],
'/etc/nsswitch.conf': ['/usr/share/base-files/nsswitch.conf'],
'/etc/pam.d/common-account': ['9d50c7dda6ba8b6a8422fd4453722324'],
'/etc/pam.d/common-auth': ['a326c972f4f3d20e5f9e1b06eef4d620'],
'/etc/pam.d/common-password': ['9f2fbf01b1a36a017b16ea62c7ff4c22'],
'/etc/pam.d/common-session': ['e2b72dd3efb2d6b29698f944d8723ab1'],
'/etc/pam.d/common-session-noninteractive':
['508d44b6daafbc3d6bd587e357a6ff5b'],
'/etc/pam.d/fingerprint-auth-ac':
['d851f318a16c32ed12f5b1cd55e99281'],
'/etc/pam.d/fingerprint-auth': ['d851f318a16c32ed12f5b1cd55e99281'],
'/etc/pam.d/password-auth-ac': ['e8aee610b8f5de9b6a6cdba8a33a4833'],
'/etc/pam.d/password-auth': ['e8aee610b8f5de9b6a6cdba8a33a4833'],
'/etc/pam.d/smartcard-auth-ac':
['dfa6696dc19391b065c45b9525d3ae55'],
'/etc/pam.d/smartcard-auth': ['dfa6696dc19391b065c45b9525d3ae55'],
'/etc/pam.d/system-auth-ac': ['e8aee610b8f5de9b6a6cdba8a33a4833'],
'/etc/pam.d/system-auth': ['e8aee610b8f5de9b6a6cdba8a33a4833'],
'/etc/ppp/chap-secrets': ['faac59e116399eadbb37644de6494cc4'],
'/etc/ppp/pap-secrets': ['698c4d412deedc43dde8641f84e8b2fd'],
'/etc/ppp/peers/provider': ['/usr/share/ppp/provider.peer'],
'/etc/profile': ['/usr/share/base-files/profile'],
'/etc/python/debian_config': ['7f4739eb8858d231601a5ed144099ac8'],
'/etc/rc.local': ['10fd9f051accb6fd1f753f2d48371890'],
'/etc/rsyslog.d/50-default.conf':
['/usr/share/rsyslog/50-default.conf'],
'/etc/security/opasswd': ['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/restorecond.conf':
['b5b371cb8c7b33e17bdd0d327fa69b60'],
'/etc/selinux/targeted/modules/semanage.trans.LOCK':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/modules/active/file_contexts.template':
['bfa4d9e76d88c7dc49ee34ac6f4c3925'],
'/etc/selinux/targeted/modules/active/file_contexts':
['1622b57a3b85db3112c5f71238c68d3e'],
'/etc/selinux/targeted/modules/active/users_extra':
['daab665152753da1bf92ca0b2af82999'],
'/etc/selinux/targeted/modules/active/base.pp':
['6540e8e1a9566721e70953a3cb946de4'],
'/etc/selinux/targeted/modules/active/modules/fetchmail.pp':
['0b0c7845f10170a76b9bd4213634cb43'],
'/etc/selinux/targeted/modules/active/modules/usbmuxd.pp':
['72a039c5108de78060651833a073dcd1'],
'/etc/selinux/targeted/modules/active/modules/pulseaudio.pp':
['d9c4f1abf8397d7967bb3014391f7b61'],
'/etc/selinux/targeted/modules/active/modules/screen.pp':
['c343b6c4df512b3ef435f06ed6cfd8b4'],
'/etc/selinux/targeted/modules/active/modules/cipe.pp':
['4ea2d39babaab8e83e29d13d7a83e8da'],
'/etc/selinux/targeted/modules/active/modules/rpcbind.pp':
['48cdaa5a31d75f95690106eeaaf855e3'],
'/etc/selinux/targeted/modules/active/modules/nut.pp':
['d8c81e82747c85d6788acc9d91178772'],
'/etc/selinux/targeted/modules/active/modules/mozilla.pp':
['405329d98580ef56f9e525a66adf7dc5'],
'/etc/selinux/targeted/modules/active/modules/openvpn.pp':
['110fe4c59b7d7124a7d33fda1f31428a'],
'/etc/selinux/targeted/modules/active/modules/denyhosts.pp':
['d12dba0c7eea142c16abd1e0424dfda4'],
'/etc/selinux/targeted/modules/active/modules/rhcs.pp':
['e7a6bf514011f39f277d401cd3d3186a'],
'/etc/selinux/targeted/modules/active/modules/radius.pp':
['a7380d93d0ac922364bc1eda85af80bf'],
'/etc/selinux/targeted/modules/active/modules/policykit.pp':
['1828a7a89c5c7a9cd0bd1b04b379e2c0'],
'/etc/selinux/targeted/modules/active/modules/varnishd.pp':
['260ef0797e6178de4edeeeca741e2374'],
'/etc/selinux/targeted/modules/active/modules/bugzilla.pp':
['c70402a459add46214ee370039398931'],
'/etc/selinux/targeted/modules/active/modules/java.pp':
['ac691d90e755a9a929c1c8095d721899'],
'/etc/selinux/targeted/modules/active/modules/courier.pp':
['d6eb2ef77d755fd49d61e48383867ccb'],
'/etc/selinux/targeted/modules/active/modules/userhelper.pp':
['787e5ca0ee1c9e744e9116837d73c2b9'],
'/etc/selinux/targeted/modules/active/modules/sssd.pp':
['aeb11626d9f34af08e9cd50b1b5751c7'],
'/etc/selinux/targeted/modules/active/modules/munin.pp':
['db2927d889a3dfbe439eb67dfdcba61d'],
'/etc/selinux/targeted/modules/active/modules/ppp.pp':
['7c6f91f4aae1c13a3d2a159a4c9b8553'],
'/etc/selinux/targeted/modules/active/modules/xfs.pp':
['6b3be69f181f28e89bfcffa032097dcb'],
'/etc/selinux/targeted/modules/active/modules/consolekit.pp':
['ef682e07a732448a12f2e93da946d655'],
'/etc/selinux/targeted/modules/active/modules/telnet.pp':
['43fd78d022e499bcb6392da33ed6e28d'],
'/etc/selinux/targeted/modules/active/modules/nagios.pp':
['9c9e482867dce0aa325884a50a023a83'],
'/etc/selinux/targeted/modules/active/modules/sysstat.pp':
['0fc4e6b3472ce5e8cfd0f3e785809552'],
'/etc/selinux/targeted/modules/active/modules/tor.pp':
['2c926e3c5b79879ed992b72406544394'],
'/etc/selinux/targeted/modules/active/modules/qpidd.pp':
['959d4763313e80d8a75bc009094ea085'],
'/etc/selinux/targeted/modules/active/modules/radvd.pp':
['a7636d3df0f431ad421170150e8a9d2e'],
'/etc/selinux/targeted/modules/active/modules/aiccu.pp':
['c0eafc1357cd0c07be4034c1a27ada98'],
'/etc/selinux/targeted/modules/active/modules/tgtd.pp':
['55da30386834e60a10b4bab582a1b689'],
'/etc/selinux/targeted/modules/active/modules/sectoolm.pp':
['6f8fba8d448da09f85a03622de295ba9'],
'/etc/selinux/targeted/modules/active/modules/unconfineduser.pp':
['0bc2f6faf3b38a657c4928ec7b611d7a'],
'/etc/selinux/targeted/modules/active/modules/sambagui.pp':
['31a5121c80a6114b25db4984bdf8d999'],
'/etc/selinux/targeted/modules/active/modules/mpd.pp':
['cdabce7844a227a81c2334dec0c49e9b'],
'/etc/selinux/targeted/modules/active/modules/hddtemp.pp':
['76d85610a7e198c82406d850ccd935e1'],
'/etc/selinux/targeted/modules/active/modules/clamav.pp':
['f8f5b60e3f5b176810ea0666b989f63d'],
'/etc/selinux/targeted/modules/active/modules/tvtime.pp':
['886dc0a6e9ebcbb6787909851e7c209f'],
'/etc/selinux/targeted/modules/active/modules/cgroup.pp':
['9e1cd610b6fde0e9b42cabd7f994db46'],
'/etc/selinux/targeted/modules/active/modules/rshd.pp':
['e39cec5e9ade8a619ecb91b85a351408'],
'/etc/selinux/targeted/modules/active/modules/roundup.pp':
['133b9b3b2f70422953851e18d6c24276'],
'/etc/selinux/targeted/modules/active/modules/virt.pp':
['9ae34fca60c651c10298797c1260ced0'],
'/etc/selinux/targeted/modules/active/modules/asterisk.pp':
['f823fdcb2c6df4ddde374c9edb11ef26'],
'/etc/selinux/targeted/modules/active/modules/livecd.pp':
['8972e6ef04f490b8915e7983392b96ce'],
'/etc/selinux/targeted/modules/active/modules/netlabel.pp':
['91fc83e5798bd271742823cbb78c17ff'],
'/etc/selinux/targeted/modules/active/modules/qemu.pp':
['e561673d5f9e5c19bcae84c1641fa4a7'],
'/etc/selinux/targeted/modules/active/modules/unconfined.pp':
['3acd5dceb6b7a71c32919c29ef920785'],
'/etc/selinux/targeted/modules/active/modules/postgresql.pp':
['3ecc9f2c7b911fa37d8ab6cc1c6b0ea7'],
'/etc/selinux/targeted/modules/active/modules/apache.pp':
['c0089e4472399e9bc5237b1e0485ac39'],
'/etc/selinux/targeted/modules/active/modules/abrt.pp':
['09e212789d19f41595d7952499236a0c'],
'/etc/selinux/targeted/modules/active/modules/rsync.pp':
['e2567e8716c116ea6324c77652c97137'],
'/etc/selinux/targeted/modules/active/modules/git.pp':
['7904fd9fbae924be5377ccd51036248e'],
'/etc/selinux/targeted/modules/active/modules/amanda.pp':
['594eddbbe3b4530e79702fc6a882010e'],
'/etc/selinux/targeted/modules/active/modules/cvs.pp':
['62cf7b7d58f507cc9f507a6c303c8020'],
'/etc/selinux/targeted/modules/active/modules/chronyd.pp':
['a4ff3e36070d461771230c4019b23440'],
'/etc/selinux/targeted/modules/active/modules/gpm.pp':
['ed3f26e774be81c2cbaaa87dcfe7ae2d'],
'/etc/selinux/targeted/modules/active/modules/modemmanager.pp':
['840d4da9f32a264436f1b22d4d4a0b2a'],
'/etc/selinux/targeted/modules/active/modules/podsleuth.pp':
['67e659e9554bc35631ee829b5dc71647'],
'/etc/selinux/targeted/modules/active/modules/publicfile.pp':
['0f092d92c326444dc9cee78472c56655'],
'/etc/selinux/targeted/modules/active/modules/postfix.pp':
['a00647ad811c22810c76c1162a97e74b'],
'/etc/selinux/targeted/modules/active/modules/exim.pp':
['8c3cd1fbd8f68e80ac7707f243ac1911'],
'/etc/selinux/targeted/modules/active/modules/telepathy.pp':
['9b32f699beb6f9c563f06f6b6d76732c'],
'/etc/selinux/targeted/modules/active/modules/amtu.pp':
['1b87c9fef219244f80b1f8f57a2ce7ea'],
'/etc/selinux/targeted/modules/active/modules/bitlbee.pp':
['cf0973c8fff61577cf330bb74ef75eed'],
'/etc/selinux/targeted/modules/active/modules/memcached.pp':
['0146491b4ab9fbd2854a7e7fb2092168'],
'/etc/selinux/targeted/modules/active/modules/sandbox.pp':
['82502d6d11b83370d1a77343f20d669f'],
'/etc/selinux/targeted/modules/active/modules/dictd.pp':
['6119d37987ea968e90a39d96866e5805'],
'/etc/selinux/targeted/modules/active/modules/pingd.pp':
['16c40af7785c8fa9d40789284ce8fbb9'],
'/etc/selinux/targeted/modules/active/modules/milter.pp':
['acaec7d2ee341e97ac5e345b55f6c7ae'],
'/etc/selinux/targeted/modules/active/modules/snort.pp':
['25f360aa5dec254a8fc18262bbe40510'],
'/etc/selinux/targeted/modules/active/modules/cups.pp':
['5323d417895d5ab508048e2bc45367bf'],
'/etc/selinux/targeted/modules/active/modules/rdisc.pp':
['5bed79cb1f4d5a2b822d6f8dbf53fe97'],
'/etc/selinux/targeted/modules/active/modules/rlogin.pp':
['6f88cc86985b4bc79d4b1afbffb1a732'],
'/etc/selinux/targeted/modules/active/modules/openct.pp':
['884f078f5d12f7b1c75cf011a94746e1'],
'/etc/selinux/targeted/modules/active/modules/dbskk.pp':
['caa93f24bfeede892fd97c59ee8b61da'],
'/etc/selinux/targeted/modules/active/modules/bluetooth.pp':
['ce4f1b34168c537b611783033316760e'],
'/etc/selinux/targeted/modules/active/modules/gpsd.pp':
['dd15485b8c6e5aeac018ddbe0948464c'],
'/etc/selinux/targeted/modules/active/modules/tuned.pp':
['5fc9de20402245e4a1a19c5b31101d06'],
'/etc/selinux/targeted/modules/active/modules/piranha.pp':
['fcedf8588c027633bedb76b598b7586f'],
'/etc/selinux/targeted/modules/active/modules/vhostmd.pp':
['0ca7152ed8a0ae393051876fe89ed657'],
'/etc/selinux/targeted/modules/active/modules/corosync.pp':
['20518dface3d23d7408dd56a51c8e6e1'],
'/etc/selinux/targeted/modules/active/modules/clogd.pp':
['533994a32ecf847a3162675e171c847c'],
'/etc/selinux/targeted/modules/active/modules/samba.pp':
['c7cd9b91a5ba4f0744e3f55a800f2831'],
'/etc/selinux/targeted/modules/active/modules/howl.pp':
['fef7dd76a97921c3e5e0e66fbac15091'],
'/etc/selinux/targeted/modules/active/modules/shutdown.pp':
['55f36d9820dcd19c66729d446d3ce6b2'],
'/etc/selinux/targeted/modules/active/modules/oddjob.pp':
['54d59b40e7bc0dc0dee3882e6c0ce9f3'],
'/etc/selinux/targeted/modules/active/modules/pcscd.pp':
['e728f332850dfcb5637c4e8f220af2fc'],
'/etc/selinux/targeted/modules/active/modules/canna.pp':
['de4f1a3ada6f9813da36febc31d2a282'],
'/etc/selinux/targeted/modules/active/modules/arpwatch.pp':
['0ddc328fa054f363a035ba44ec116514'],
'/etc/selinux/targeted/modules/active/modules/seunshare.pp':
['64844bbf79ee23e087a5741918f3a7ad'],
'/etc/selinux/targeted/modules/active/modules/rhgb.pp':
['c9630cc5830fcb4b775985c5740f5a71'],
'/etc/selinux/targeted/modules/active/modules/prelude.pp':
['2b85511c571c19751bb79b288267661c'],
'/etc/selinux/targeted/modules/active/modules/portmap.pp':
['231abe579c0370f49cac533c6057792b'],
'/etc/selinux/targeted/modules/active/modules/logadm.pp':
['980b1345ef8944a90b6efdff0c8b3278'],
'/etc/selinux/targeted/modules/active/modules/ptchown.pp':
['987fc8a6ff50ef7eed0edc79f91b1ec5'],
'/etc/selinux/targeted/modules/active/modules/vmware.pp':
['8cf31ec8abd75f2a6c56857146caf5a1'],
'/etc/selinux/targeted/modules/active/modules/portreserve.pp':
['0354f017b429dead8de0d143f7950fcc'],
'/etc/selinux/targeted/modules/active/modules/awstats.pp':
['c081d3168b28765182bb4ec937b4c0b1'],
'/etc/selinux/targeted/modules/active/modules/tmpreaper.pp':
['ac0173dd09a54a87fdcb42d3a5e29442'],
'/etc/selinux/targeted/modules/active/modules/postgrey.pp':
['68013352c07570ac38587df9fb7e88ee'],
'/etc/selinux/targeted/modules/active/modules/tftp.pp':
['a47fb7872bfb06d80c8eef969d91e6f9'],
'/etc/selinux/targeted/modules/active/modules/rgmanager.pp':
['1cee78e1ff3f64c4d013ce7b820e534b'],
'/etc/selinux/targeted/modules/active/modules/aisexec.pp':
['95e70fd35e9cb8284488d6bf970815b7'],
'/etc/selinux/targeted/modules/active/modules/xguest.pp':
['d8df4b61df93008cd594f98c852d4cba'],
'/etc/selinux/targeted/modules/active/modules/cobbler.pp':
['6978d8b37b1da384130db5c5c2144175'],
'/etc/selinux/targeted/modules/active/modules/mysql.pp':
['d147af479531042f13e70d72bd58a0e9'],
'/etc/selinux/targeted/modules/active/modules/amavis.pp':
['7fc17b2f47c1d8226a9003df1ef67bb5'],
'/etc/selinux/targeted/modules/active/modules/fprintd.pp':
['d58f18b496f69a74ece1f1b1b9432405'],
'/etc/selinux/targeted/modules/active/modules/nis.pp':
['d696b167de5817226298306c79761fa2'],
'/etc/selinux/targeted/modules/active/modules/squid.pp':
['3f9e075e79ec5aa59609a7ccebce0afe'],
'/etc/selinux/targeted/modules/active/modules/smokeping.pp':
['98b83cac4488d7dd18c479b62dd3cf15'],
'/etc/selinux/targeted/modules/active/modules/ktalk.pp':
['afe14e94861782679305c91da05e7d5e'],
'/etc/selinux/targeted/modules/active/modules/certwatch.pp':
['bf13c9a642ded8354ba26d5462ddd60c'],
'/etc/selinux/targeted/modules/active/modules/games.pp':
['3bcd17c07699d58bd436896e75a24520'],
'/etc/selinux/targeted/modules/active/modules/zabbix.pp':
['5445ccfec7040ff1ccf3abf4de2e9a3c'],
'/etc/selinux/targeted/modules/active/modules/rwho.pp':
['710e29c8e621de6af9ca74869624b9f0'],
'/etc/selinux/targeted/modules/active/modules/w3c.pp':
['aea6b9518cb3fa904cc7ee82239b07c2'],
'/etc/selinux/targeted/modules/active/modules/cyphesis.pp':
['dccb3f009cd56c5f8856861047d7f2ff'],
'/etc/selinux/targeted/modules/active/modules/kismet.pp':
['f2d984e007275d35dd03a2d59ade507e'],
'/etc/selinux/targeted/modules/active/modules/zosremote.pp':
['77a2681c4b1c3c001faeca9874b58ecf'],
'/etc/selinux/targeted/modules/active/modules/pads.pp':
['76b7413009a202e228ee08c5511f3f42'],
'/etc/selinux/targeted/modules/active/modules/avahi.pp':
['b59670ba623aba37ab8f0f1f1127893a'],
'/etc/selinux/targeted/modules/active/modules/apcupsd.pp':
['81fae28232730a49b7660797ef4354c3'],
'/etc/selinux/targeted/modules/active/modules/usernetctl.pp':
['22850457002a48041d885c0d74fbd934'],
'/etc/selinux/targeted/modules/active/modules/finger.pp':
['5dd6b44358bbfabfdc4f546e1ed34370'],
'/etc/selinux/targeted/modules/active/modules/dhcp.pp':
['7e63b07b64848a017eec5d5f6b88f22e'],
'/etc/selinux/targeted/modules/active/modules/xen.pp':
['67086e8e94bdaab8247ac4d2e23162d1'],
'/etc/selinux/targeted/modules/active/modules/plymouthd.pp':
['1916027e7c9f28430fa2ac30334e8964'],
'/etc/selinux/targeted/modules/active/modules/uucp.pp':
['5bec7a345a314a37b4a2227bdfa926f1'],
'/etc/selinux/targeted/modules/active/modules/daemontools.pp':
['aad7633adfc8b04e863b481deebaf14a'],
'/etc/selinux/targeted/modules/active/modules/kdumpgui.pp':
['66e08b4187623fa1c535972a35ec058c'],
'/etc/selinux/targeted/modules/active/modules/privoxy.pp':
['f13c986051659fa900786ea54a59ceae'],
'/etc/selinux/targeted/modules/active/modules/unprivuser.pp':
['a0d128b495a6ea5da72c849ac63c5848'],
'/etc/selinux/targeted/modules/active/modules/ada.pp':
['a75fd52c873e2c9326ad87f7515a664f'],
'/etc/selinux/targeted/modules/active/modules/lircd.pp':
['3cc5cc5b24d40416f9d630a80005d33b'],
'/etc/selinux/targeted/modules/active/modules/openoffice.pp':
['522c3ee13bc37cbe9903d00f0cbccd1d'],
'/etc/selinux/targeted/modules/active/modules/puppet.pp':
['9da4c553f40f3dea876171e672168044'],
'/etc/selinux/targeted/modules/active/modules/wine.pp':
['31c470eabd98c5a5dbc66ba52ad64de0'],
'/etc/selinux/targeted/modules/active/modules/ulogd.pp':
['065551ea63de34a7257ecec152f61552'],
'/etc/selinux/targeted/modules/active/modules/mplayer.pp':
['f889dbfa3d9ef071d8e569def835a2f3'],
'/etc/selinux/targeted/modules/active/modules/ftp.pp':
['75a9f3563903eb8126ffbcc9277e1d8c'],
'/etc/selinux/targeted/modules/active/modules/gnome.pp':
['b859e2d45123f60ff27a90cdb0f40e1b'],
'/etc/selinux/targeted/modules/active/modules/ethereal.pp':
['8963c6b80025b27850f0cdf565e5bd54'],
'/etc/selinux/targeted/modules/active/modules/iscsi.pp':
['7786cb4a84889010751b4d89c72a2956'],
'/etc/selinux/targeted/modules/active/modules/chrome.pp':
['cb44c1c7b13cc04c07c4e787a259b63f'],
'/etc/selinux/targeted/modules/active/modules/guest.pp':
['308d614589af73e39a22e5c741e9eecb'],
'/etc/selinux/targeted/modules/active/modules/inn.pp':
['8d60592dcd3bf4d2fa97f0fefa9374ca'],
'/etc/selinux/targeted/modules/active/modules/gitosis.pp':
['21c79a711157224bebba0a2cccbe8881'],
'/etc/selinux/targeted/modules/active/modules/ksmtuned.pp':
['8f985e777c206d2bde3fc2ac6a28cd24'],
'/etc/selinux/targeted/modules/active/modules/sosreport.pp':
['9b4780d27555e94335f80a0bb2ab4f14'],
'/etc/selinux/targeted/modules/active/modules/ipsec.pp':
['68cacb8c78796957fb4a181390033b16'],
'/etc/selinux/targeted/modules/active/modules/comsat.pp':
['1cecb3f5cbe24251017908e14838ee2a'],
'/etc/selinux/targeted/modules/active/modules/gpg.pp':
['75358ddabb045e91010d80f1ab68307a'],
'/etc/selinux/targeted/modules/active/modules/gnomeclock.pp':
['a4e74df48faab3af8f4df0fa16c65c7e'],
'/etc/selinux/targeted/modules/active/modules/sasl.pp':
['5ba9be813a7dd4236fc2d37bc17c5052'],
'/etc/selinux/targeted/modules/active/modules/vpn.pp':
['32ae00c287432ae5ad4f8affbc9e44fe'],
'/etc/selinux/targeted/modules/active/modules/accountsd.pp':
['308057b48c6d70a45e5a603dbe625c2d'],
'/etc/selinux/targeted/modules/active/modules/devicekit.pp':
['1f5a8f12ebeebfed2cfeb3ee4648dd13'],
'/etc/selinux/targeted/modules/active/modules/psad.pp':
['b02f11705249c93735f019f5b97fdf7b'],
'/etc/selinux/targeted/modules/active/modules/mono.pp':
['8bba1cc6826e8300c140f9c393ad07e9'],
'/etc/selinux/targeted/modules/active/modules/cachefilesd.pp':
['82b93ba87b5920ecc8a7388f4cf8ea43'],
'/etc/selinux/targeted/modules/active/modules/usbmodules.pp':
['20c3a57da3c1311a75a63f1c6ae91bf3'],
'/etc/selinux/targeted/modules/active/modules/certmonger.pp':
['b9fe8ba6abc5204cd8eec546f5614ff5'],
'/etc/selinux/targeted/modules/active/modules/pegasus.pp':
['bb0ec4379c28b196d1794d7310111d98'],
'/etc/selinux/targeted/modules/active/modules/ntop.pp':
['99b46fe44ccf3c4e045dbc73d2a88f59'],
'/etc/selinux/targeted/modules/active/modules/zebra.pp':
['12adcaae458d18f650578ce25e10521a'],
'/etc/selinux/targeted/modules/active/modules/soundserver.pp':
['583abd9ccef70279bff856516974d471'],
'/etc/selinux/targeted/modules/active/modules/stunnel.pp':
['2693ac1bf08287565c3b4e58d0f9ea55'],
'/etc/selinux/targeted/modules/active/modules/ldap.pp':
['039baf0976f316c3f209a5661174a72e'],
'/etc/selinux/targeted/modules/active/modules/fail2ban.pp':
['ce13513c427ff140bf988b01bd52e886'],
'/etc/selinux/targeted/modules/active/modules/spamassassin.pp':
['e02232992676b0e1279c54bfeea290e3'],
'/etc/selinux/targeted/modules/active/modules/procmail.pp':
['d5c58e90fac452a1a6d68cc496e7f1ae'],
'/etc/selinux/targeted/modules/active/modules/afs.pp':
['6e7a4bf08dc7fa5a0f97577b913267ad'],
'/etc/selinux/targeted/modules/active/modules/ricci.pp':
['8b1d44245be204907c82c3580a43901d'],
'/etc/selinux/targeted/modules/active/modules/qmail.pp':
['ea08eb2172c275598d4f85c9b78182cd'],
'/etc/selinux/targeted/modules/active/modules/ccs.pp':
['cad223d57f431e2f88a1d1542c2ac504'],
'/etc/selinux/targeted/modules/active/modules/audioentropy.pp':
['19f6fd5e3ee2a3726a952631e993a133'],
'/etc/selinux/targeted/modules/active/modules/ncftool.pp':
['c15f4833a21e9c8cd1237ee568aadcf3'],
'/etc/selinux/targeted/modules/active/modules/nx.pp':
['3677983206101cfcd2182e180ef3876b'],
'/etc/selinux/targeted/modules/active/modules/rtkit.pp':
['0eaae15f4c12522270b26769487a06e0'],
'/etc/selinux/targeted/modules/active/modules/ntp.pp':
['141339ee3372e07d32575c6777c8e466'],
'/etc/selinux/targeted/modules/active/modules/likewise.pp':
['b5f0d18f8b601e102fd9728fbb309692'],
'/etc/selinux/targeted/modules/active/modules/aide.pp':
['69600bc8a529f8128666a563c7409929'],
'/etc/selinux/targeted/modules/active/modules/nslcd.pp':
['5c87b1c80bdd8bbf60c33ef51a765a93'],
'/etc/selinux/targeted/modules/active/modules/slocate.pp':
['fdea88c374382f3d652a1ac529fbd189'],
'/etc/selinux/targeted/modules/active/modules/execmem.pp':
['44cc2d117e3bf1a33d4e3516aaa7339d'],
'/etc/selinux/targeted/modules/active/modules/cpufreqselector.pp':
['7da9c9690dc4f076148ef35c3644af13'],
'/etc/selinux/targeted/modules/active/modules/cmirrord.pp':
['084b532fa5ccd6775c483d757bcd0920'],
'/etc/selinux/targeted/modules/active/modules/bind.pp':
['5560f5706c8c8e83d8a2ac03a85b93fb'],
'/etc/selinux/targeted/modules/active/modules/uml.pp':
['a0841bc9ffca619fe5d44c557b70d258'],
'/etc/selinux/targeted/modules/active/modules/staff.pp':
['bdf16ee0fa0721770aa31c52e45227c3'],
'/etc/selinux/targeted/modules/active/modules/certmaster.pp':
['bc589a4f0dd49a05d52b9ffda7bdd149'],
'/etc/selinux/targeted/modules/active/modules/webalizer.pp':
['c99ccad469be3c901ede9da9a87e44b2'],
'/etc/selinux/targeted/modules/active/modules/hal.pp':
['c75783ec2dd49d437a242e0c69c31c96'],
'/etc/selinux/targeted/modules/active/modules/kdump.pp':
['d731820c7b5bb711566ea23970106b7a'],
'/etc/selinux/targeted/modules/active/modules/firewallgui.pp':
['ee3522a0072989ed08f70b03f7fd69d9'],
'/etc/selinux/targeted/modules/active/modules/tcpd.pp':
['b1f7db819812da14c4e836a9d9e79980'],
'/etc/selinux/targeted/modules/active/modules/mailman.pp':
['4116cbe11d943a076dd06cea91993745'],
'/etc/selinux/targeted/modules/active/modules/smartmon.pp':
['45d6440b436d8ac3f042e80c392dd672'],
'/etc/selinux/targeted/modules/active/modules/smoltclient.pp':
['dcfd6ecd62ee7191abda39315ec6ef1b'],
'/etc/selinux/targeted/modules/active/modules/kerberos.pp':
['936533081cfbe28eb9145fde86edb4f8'],
'/etc/selinux/targeted/modules/active/modules/lockdev.pp':
['e2da620d3272f296dd90bff8b921d203'],
'/etc/selinux/targeted/modules/active/modules/automount.pp':
['a06d3d617c6d8c29e29ce3fb0db48c9c'],
'/etc/selinux/targeted/modules/active/modules/webadm.pp':
['4ac9b2f95f8d8218ec93f001995fd8ba'],
'/etc/selinux/targeted/modules/active/modules/pyzor.pp':
['c2b00c08d77d7d5a8588dd82c489e354'],
'/etc/selinux/targeted/modules/active/modules/rssh.pp':
['aacef6c826e9d699e84a1dd564b68105'],
'/etc/selinux/targeted/modules/active/modules/nsplugin.pp':
['0c90d308f5e956900150eb6ed84b0b54'],
'/etc/selinux/targeted/modules/active/modules/lpd.pp':
['5bf17a46aa2d3e2ecc0daffcf092054e'],
'/etc/selinux/targeted/modules/active/modules/dcc.pp':
['84749af337d72ba6bbbe54b013c6c62c'],
'/etc/selinux/targeted/modules/active/modules/irc.pp':
['42897f214251c7ca9bc04379c4abff5e'],
'/etc/selinux/targeted/modules/active/modules/icecast.pp':
['962c81fc8ef5fd49c925a2249d229d1d'],
'/etc/selinux/targeted/modules/active/modules/dnsmasq.pp':
['ec4a8a50eb5806e450d97a77cbe8a8b4'],
'/etc/selinux/targeted/modules/active/modules/jabber.pp':
['5a528d52f7337d44bfc867333f2b1921'],
'/etc/selinux/targeted/modules/active/modules/remotelogin.pp':
['68c22a0bc6e4d5031153cf10d75ba76a'],
'/etc/selinux/targeted/modules/active/modules/boinc.pp':
['a70386e9ffdaccd04cbb565e6fe5c822'],
'/etc/selinux/targeted/modules/active/modules/mrtg.pp':
['7e6f395e72768d350d259c15d22a1cbb'],
'/etc/selinux/targeted/modules/active/modules/snmp.pp':
['fc5166e3066504601037054874fe0487'],
'/etc/selinux/targeted/modules/active/modules/cyrus.pp':
['d2e792bf111ce4a6ffdb87fe11d89d16'],
'/etc/selinux/targeted/modules/active/modules/dovecot.pp':
['b716de8b77f0dfeb9212d5cf36bddfa1'],
'/etc/selinux/targeted/modules/active/modules/cdrecord.pp':
['24c0325480e2f1d6cf1ce31c25d5f10a'],
'/etc/selinux/targeted/modules/active/modules/calamaris.pp':
['c7ec43f01369524db32249fb755f4e7f'],
'/etc/selinux/targeted/modules/active/modules/kerneloops.pp':
['2493d3308dfcd34e94308af9d5c888c3'],
'/etc/selinux/targeted/modules/active/modules/razor.pp':
['06425e50a31f14cec090c30e05fb9827'],
'/etc/selinux/targeted/modules/active/netfilter_contexts':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/modules/active/seusers.final':
['fdf1cdf1d373e4583ca759617a1d2af3'],
'/etc/selinux/targeted/modules/active/file_contexts.homedirs':
['d7c4747704e9021ec2e16c7139fedfd9'],
'/etc/selinux/targeted/modules/active/commit_num':
['c08cc266624f6409b01432dac9576ab0'],
'/etc/selinux/targeted/modules/active/policy.kern':
['5398a60f820803049b5bb7d90dd6196b'],
'/etc/selinux/targeted/modules/active/homedir_template':
['682a31c8036aaf9cf969093d7162960a'],
'/etc/selinux/targeted/modules/semanage.read.LOCK':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/contexts/failsafe_context':
['940b12538b676287b3c33e68426898ac'],
'/etc/selinux/targeted/contexts/virtual_domain_context':
['1e28f1b8e58e56a64c852bd77f57d121'],
'/etc/selinux/targeted/contexts/removable_context':
['e56a6b14d2bed27405d2066af463df9f'],
'/etc/selinux/targeted/contexts/netfilter_contexts':
['d41d8cd98f00b204e9800998ecf8427e'],
'/etc/selinux/targeted/contexts/userhelper_context':
['53441d64f9bc6337e3aac33f05d0954c'],
'/etc/selinux/targeted/contexts/virtual_image_context':
['b21a69d3423d2e085d5195e25922eaa1'],
'/etc/selinux/targeted/contexts/securetty_types':
['ee2445f940ed1b33e778a921cde8ad9e'],
'/etc/selinux/targeted/contexts/default_type':
['d0f63fea19ee82e5f65bdbb1de899c5d'],
'/etc/selinux/targeted/contexts/dbus_contexts':
['b1c42884fa5bdbde53d64cff469374fd'],
'/etc/selinux/targeted/contexts/files/file_contexts':
['1622b57a3b85db3112c5f71238c68d3e'],
'/etc/selinux/targeted/contexts/files/file_contexts.homedirs':
['d7c4747704e9021ec2e16c7139fedfd9'],
'/etc/selinux/targeted/contexts/files/media':
['3c867677892c0a15dc0b9e9811cc2c49'],
'/etc/selinux/targeted/contexts/initrc_context':
['99866a62735a38b2bf839233c1a1689d'],
'/etc/selinux/targeted/contexts/x_contexts':
['9dde3f5e3ddac42b9e99a4613c972b97'],
'/etc/selinux/targeted/contexts/customizable_types':
['68be87281cf3d40cb2c4606cd2b1ea2b'],
'/etc/selinux/targeted/contexts/users/xguest_u':
['e26010a418df86902332c57434370246'],
'/etc/selinux/targeted/contexts/users/unconfined_u':
['ee88bed48d9601ff2b11f68f97d361ac'],
'/etc/selinux/targeted/contexts/users/staff_u':
['f3412f7cbf441078a9de40fcaab93254'],
'/etc/selinux/targeted/contexts/users/root':
['328e08341d1ff9296573dd43c355e283'],
'/etc/selinux/targeted/contexts/users/user_u':
['2fe911f440282fda0590cd99540da579'],
'/etc/selinux/targeted/contexts/users/guest_u':
['61e7e7e7403b2eac30e312342e66e4cd'],
'/etc/selinux/targeted/contexts/default_contexts':
['0888c75fc814058bb3c01ef58f7a1f47'],
'/etc/selinux/targeted/policy/policy.24':
['5398a60f820803049b5bb7d90dd6196b'],
'/etc/selinux/targeted/setrans.conf':
['ae70362b6fa2af117bd6e293ce232069'],
'/etc/selinux/targeted/seusers':
['fdf1cdf1d373e4583ca759617a1d2af3'],
'/etc/selinux/config': ['91081ef6d958e79795d0255d7c374a56'],
'/etc/selinux/restorecond_user.conf':
['4e1b5b5e38c660f87d5a4f7d3a998c29'],
'/etc/selinux/semanage.conf': ['f33b524aef1a4df2a3d0eecdda041a5c'],
'/etc/sgml/xml-core.cat': ['bcd454c9bf55a3816a134f9766f5928f'],
'/etc/shells': ['0e85c87e09d716ecb03624ccff511760'],
'/etc/ssh/sshd_config': ['e24f749808133a27d94fda84a89bb27b',
'8caefdd9e251b7cc1baa37874149a870',
'874fafed9e745b14e5fa8ae71b82427d'],
'/etc/sudoers': ['02f74ccbec48997f402a063a172abb48'],
'/etc/ufw/after.rules': ['/usr/share/ufw/after.rules'],
'/etc/ufw/after6.rules': ['/usr/share/ufw/after6.rules'],
'/etc/ufw/before.rules': ['/usr/share/ufw/before.rules'],
'/etc/ufw/before6.rules': ['/usr/share/ufw/before6.rules'],
'/etc/ufw/ufw.conf': ['/usr/share/ufw/ufw.conf']}
for pathname, overrides in MD5SUMS.iteritems():
for i in range(len(overrides)):
if '/' != overrides[i][0]:
continue
try:
overrides[i] = hashlib.md5(open(overrides[i]).read()).hexdigest()
except IOError:
pass
def files(b, r):
logging.info('searching for configuration files')
# Visit every file in `/etc` except those on the exclusion list above.
for dirpath, dirnames, filenames in os.walk('/etc'):
# Determine if this entire directory should be ignored by default.
ignored = r.ignore_file(dirpath)
# Collect up the full pathname to each file, `lstat` them all, and
# note which ones will probably be ignored.
files = []
for filename in filenames:
pathname = os.path.join(dirpath, filename)
try:
files.append((pathname,
os.lstat(pathname),
r.ignore_file(pathname, ignored)))
except OSError as e:
logging.warning('{0} caused {1} - try running as root'.
format(pathname, errno.errorcode[e.errno]))
# Track the ctime of each file in this directory. Weed out false
# positives by ignoring files with common ctimes.
ctimes = defaultdict(lambda: 0)
# Map the ctimes of each directory entry that isn't being ignored.
for pathname, s, ignored in files:
if not ignored:
ctimes[s.st_ctime] += 1
for dirname in dirnames:
try:
ctimes[os.lstat(os.path.join(dirpath, dirname)).st_ctime] += 1
except OSError:
pass
for pathname, s, ignored in files:
# Always ignore block special files, character special files,
# pipes, and sockets. They end up looking like deadlocks.
if stat.S_ISBLK(s.st_mode) \
or stat.S_ISCHR(s.st_mode) \
or stat.S_ISFIFO(s.st_mode) \
or stat.S_ISSOCK(s.st_mode):
continue
# Make sure this pathname will actually be able to be included
# in the blueprint. This is a bit of a cop-out since the file
# could be important but at least it's not a crashing bug.
try:
pathname = unicode(pathname)
except UnicodeDecodeError:
logging.warning('{0} not UTF-8 - skipping it'.
format(repr(pathname)[1:-1]))
continue
# Ignore ignored files and files that share their ctime with other
# files in the directory. This is a very strong indication that
# the file is original to the system and should be ignored.
if ignored \
or 1 < ctimes[s.st_ctime] and r.ignore_file(pathname, True):
continue
# Check for a Mustache template and an optional shell script
# that templatize this file.
try:
template = open(
'{0}.blueprint-template.mustache'.format(pathname)).read()
except IOError:
template = None
try:
data = open(
'{0}.blueprint-template.sh'.format(pathname)).read()
except IOError:
data = None
# The content is used even for symbolic links to determine whether
# it has changed from the packaged version.
try:
content = open(pathname).read()
except IOError:
#logging.warning('{0} not readable'.format(pathname))
continue
# Ignore files that are unchanged from their packaged version.
if _unchanged(pathname, content, r):
continue
# Resolve the rest of the file's metadata from the
# `/etc/passwd` and `/etc/group` databases.
try:
pw = pwd.getpwuid(s.st_uid)
owner = pw.pw_name
except KeyError:
owner = s.st_uid
try:
gr = grp.getgrgid(s.st_gid)
group = gr.gr_name
except KeyError:
group = s.st_gid
mode = '{0:o}'.format(s.st_mode)
# A symbolic link's content is the link target.
if stat.S_ISLNK(s.st_mode):
content = os.readlink(pathname)
# Ignore symbolic links providing backwards compatibility
# between SystemV init and Upstart.
if '/lib/init/upstart-job' == content:
continue
# Ignore symbolic links into the Debian alternatives system.
# These are almost certainly managed by packages.
if content.startswith('/etc/alternatives/'):
continue
b.add_file(pathname,
content=content,
encoding='plain',
group=group,
mode=mode,
owner=owner)
# A regular file is stored as plain text only if it is valid
# UTF-8, which is required for JSON serialization.
else:
kwargs = dict(group=group,
mode=mode,
owner=owner)
try:
if template:
if data:
kwargs['data'] = data.decode('utf_8')
kwargs['template'] = template.decode('utf_8')
else:
kwargs['content'] = content.decode('utf_8')
kwargs['encoding'] = 'plain'
except UnicodeDecodeError:
if template:
if data:
kwargs['data'] = base64.b64encode(data)
kwargs['template'] = base64.b64encode(template)
else:
kwargs['content'] = base64.b64encode(content)
kwargs['encoding'] = 'base64'
b.add_file(pathname, **kwargs)
# If this file is a service init script or config , create a
# service resource.
try:
manager, service = util.parse_service(pathname)
if not r.ignore_service(manager, service):
b.add_service(manager, service)
b.add_service_package(manager,
service,
'apt',
*_dpkg_query_S(pathname))
b.add_service_package(manager,
service,
'yum',
*_rpm_qf(pathname))
except ValueError:
pass
def _dpkg_query_S(pathname):
"""
Return a list of package names that contain `pathname` or `[]`. This
really can be a list thanks to `dpkg-divert`(1).
"""
# Cache the pathname-to-package mapping.
if not hasattr(_dpkg_query_S, '_cache'):
_dpkg_query_S._cache = defaultdict(set)
cache_ref = _dpkg_query_S._cache
for listname in glob.iglob('/var/lib/dpkg/info/*.list'):
package = os.path.splitext(os.path.basename(listname))[0]
for line in open(listname):
cache_ref[line.rstrip()].add(package)
# Return the list of packages that contain this file, if any.
if pathname in _dpkg_query_S._cache:
return list(_dpkg_query_S._cache[pathname])
# If `pathname` isn't in a package but is a symbolic link, see if the
# symbolic link is in a package. `postinst` programs commonly display
# this pattern.
try:
return _dpkg_query_S(os.readlink(pathname))
except OSError:
pass
return []
def _dpkg_md5sum(package, pathname):
"""
Find the MD5 sum of the packaged version of pathname or `None` if the
`pathname` does not come from a Debian package.
"""
# Cache any MD5 sums stored in the status file. These are typically
# conffiles and the like.
if not hasattr(_dpkg_md5sum, '_status_cache'):
_dpkg_md5sum._status_cache = {}
cache_ref = _dpkg_md5sum._status_cache
try:
pattern = re.compile(r'^ (\S+) ([0-9a-f]{32})')
for line in open('/var/lib/dpkg/status'):
match = pattern.match(line)
if not match:
continue
cache_ref[match.group(1)] = match.group(2)
except IOError:
pass
# Return this file's MD5 sum, if it can be found.
try:
return _dpkg_md5sum._status_cache[pathname]
except KeyError:
pass
# Cache the MD5 sums for files in this package.
if not hasattr(_dpkg_md5sum, '_cache'):
_dpkg_md5sum._cache = defaultdict(dict)
if package not in _dpkg_md5sum._cache:
cache_ref = _dpkg_md5sum._cache[package]
try:
for line in open('/var/lib/dpkg/info/{0}.md5sums'.format(package)):
md5sum, rel_pathname = line.split(None, 1)
cache_ref['/{0}'.format(rel_pathname.rstrip())] = md5sum
except IOError:
pass
# Return this file's MD5 sum, if it can be found.
try:
return _dpkg_md5sum._cache[package][pathname]
except KeyError:
pass
return None
def _rpm_qf(pathname):
"""
Return a list of package names that contain `pathname` or `[]`. RPM
might not actually support a single pathname being claimed by more
than one package but `dpkg` does so the interface is maintained.
"""
try:
p = subprocess.Popen(['rpm', '--qf=%{NAME}', '-qf', pathname],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError:
return []
stdout, stderr = p.communicate()
if 0 != p.returncode:
return []
return [stdout]
def _rpm_md5sum(pathname):
"""
Find the MD5 sum of the packaged version of pathname or `None` if the
`pathname` does not come from an RPM.
"""
if not hasattr(_rpm_md5sum, '_cache'):
_rpm_md5sum._cache = {}
symlinks = []
try:
p = subprocess.Popen(['rpm', '-qa', '--dump'],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
pattern = re.compile(r'^(/etc/\S+) \d+ \d+ ([0-9a-f]+) ' # No ,
'(0\d+) \S+ \S+ \d \d \d (\S+)$')
for line in p.stdout:
match = pattern.match(line)
if match is None:
continue
if '0120777' == match.group(3):
symlinks.append((match.group(1), match.group(4)))
else:
_rpm_md5sum._cache[match.group(1)] = match.group(2)
# Find the MD5 sum of the targets of any symbolic links, even
# if the target is outside of /etc.
pattern = re.compile(r'^(/\S+) \d+ \d+ ([0-9a-f]+) ' # No ,
'(0\d+) \S+ \S+ \d \d \d (\S+)$')
for pathname, target in symlinks:
if '/' != target[0]:
target = os.path.normpath(os.path.join(
os.path.dirname(pathname), target))
if target in _rpm_md5sum._cache:
_rpm_md5sum._cache[pathname] = _rpm_md5sum._cache[target]
else:
p = subprocess.Popen(['rpm', '-qf', '--dump', target],
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
for line in p.stdout:
match = pattern.match(line)
if match is not None and target == match.group(1):
_rpm_md5sum._cache[pathname] = match.group(2)
except OSError:
pass
return _rpm_md5sum._cache.get(pathname, None)
def _unchanged(pathname, content, r):
"""
Return `True` if a file is unchanged from its packaged version.
"""
# Ignore files that are from the `base-files` package (which
# doesn't include MD5 sums for every file for some reason).
apt_packages = _dpkg_query_S(pathname)
if 'base-files' in apt_packages:
return True
# Ignore files that are unchanged from their packaged version,
# or match in MD5SUMS.
md5sums = MD5SUMS.get(pathname, [])
md5sums.extend([_dpkg_md5sum(package, pathname)
for package in apt_packages])
md5sum = _rpm_md5sum(pathname)
if md5sum is not None:
md5sums.append(md5sum)
if (hashlib.md5(content).hexdigest() in md5sums \
or 64 in [len(md5sum or '') for md5sum in md5sums] \
and hashlib.sha256(content).hexdigest() in md5sums) \
and r.ignore_file(pathname, True):
return True
return False
| 51.860082 | 79 | 0.607047 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31,084 | 0.616648 |