blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6602d7be56388caf6e20e065c13acd98b8b65dcd
|
d175411dc1068dedc60d0497f8e8514555d58975
|
/preprocess.py
|
bbaa2e018ce6fba15fa437a9701ed10bed078683
|
[] |
no_license
|
CirQ/CIKMmining
|
1fd49118da0db90e97e301d341f8b3cb1ba8e1ef
|
a47c9cdce18eb9b8e662440fc8826c2e2783f275
|
refs/heads/master
| 2020-12-03T00:29:47.402238
| 2017-07-02T16:45:15
| 2017-07-02T16:45:15
| 90,642,933
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,084
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: CirQ
# Created Time: 2017-05-08 09:25:07
def get_row_in_dataset(dataset): # dataset can only be train, testA, testB
with open("/media/cirq/All matters/CIKM/%s.txt" % dataset, "r") as r:
while True:
line = r.readline().strip()
if not line:
break
row_id, label, radar_map = line.split(",")
del line
yield row_id, float(label), [float(z) for z in radar_map.split(" ")]
del row_id, label, radar_map
# a useless method checking for the correctness of data format
def validate_dataset(dataset):
with open("/media/cirq/All matters/CIKMmining/validate_%s.txt" % dataset, "w") as w:
for row_id, label, radar_map in get_row_in_dataset(dataset):
vlen = len(radar_map)
if vlen != 612060: # 612060 = 15 * 4 * 101 * 101
w.write("%s %.2f %d\n" % (row_id, label, vlen))
else:
w.write(row_id+"->is!\n")
print "validate ", row_id
import re
def generate_csv(method, dataset):
dim = re.match(r"^\D+(\d+)$", method).group(1)
with open("/media/cirq/All matters/CIKMmining/VECTOR_%s_%s.csv" % (method, dataset), "w") as w:
w.write('"row_id","label"' + ',"radar_map"'*int(dim) + '\n')
for row_id, label, radar_map in get_row_in_dataset(dataset):
w.write("\"%s\"," % row_id)
struct_map = [[radar_map[40804*t+10201*h:40804*t+10201*(h+1)] for h in range(4)] for t in range(15)]
vector = eval("%s(label, struct_map)" % method)
w.write(",".join([str("%.2f" % f) for f in vector]))
w.write("\n")
print "processing", row_id
if __name__ == "__main__":
from methods import *
method = "timesum_h_d99"
while True:
b = raw_input("want to generate data vector? (y/n) ")
if b == "y":
break
elif b == "n":
import sys
sys.exit(0)
generate_csv(method, "train")
generate_csv(method, "testB")
|
[
"cirq999@163.com"
] |
cirq999@163.com
|
521e571bd3a79c4c63b21c88324b5819c877109b
|
3e7b2ebb64e9e324ce47d19def21ae62cc1e56a6
|
/Problem-solving/UVA/10071 - Back to High School Physics.py
|
5a579f8a8411865d8f846b8806547b4904b98228
|
[] |
no_license
|
shuvo14051/python-data-algo
|
9b6622d9260e95ca9ffabd39b02996f13bdf20d1
|
8f66ff6f2bd88a0ae48dac72e4ea6c5382a836ec
|
refs/heads/master
| 2023-02-03T03:04:01.183093
| 2020-12-13T10:13:15
| 2020-12-13T10:13:15
| 274,106,480
| 0
| 0
| null | 2020-07-05T06:33:28
| 2020-06-22T10:24:05
|
Python
|
UTF-8
|
Python
| false
| false
| 90
|
py
|
try:
while True:
v,t = map(int,input().split())
print(2*v*t)
except EOFError:
pass
|
[
"shuvo1137017@gmail.com"
] |
shuvo1137017@gmail.com
|
91e8fcd5f83531ebe8d9ff42b9b393532a7f36f5
|
06b2cdd32aa7110b9fcf8090487f81c0fd112d07
|
/pyexe/procmail.py
|
c9e4dc4152256777b51577a4af20efed82d4b99b
|
[] |
no_license
|
tuian/winterpy
|
3050e7562ec0ed57052f5f36d970a4ecce093912
|
fb3da76d583890472207683e4919d7be146d8f91
|
refs/heads/master
| 2021-01-20T02:19:12.146785
| 2017-08-19T04:08:05
| 2017-08-19T04:08:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,146
|
py
|
#!/usr/bin/env python3
import sys
import re
import io
from email import header
from simplelex import Token, Lex
from mailutils import decode_multiline_header
reply = Token(r'R[Ee]:\s?|[回答][复覆][::]\s?', 're')
ottag = Token(r'\[OT\]\s?', 'ot', flags=re.I)
tag = Token(r'\[([\w._/-]+)[^]]*\]\s?', 'tag')
lex = Lex((reply, ottag, tag))
def reformat(s):
tokens, left = lex.parse(s)
if not tokens:
return
isre = False
tags = []
ot = False
usertag = []
for tok in tokens:
if tok.idtype == 're':
isre = True
elif tok.idtype == 'ot':
ot = True
elif tok.idtype == 'tag':
tag = tok.match.group(1)
tag_text = tag.lower()
if tag_text.endswith('lug') or tag_text == 'wine-zh' or not tags:
if tag_text not in tags:
tags.append(tok.match.group(1))
elif tag in tags:
continue
else:
usertag.append(tok.data)
else:
sys.exit('error: unknown idtype: %s' % tok.idtype)
if isre:
ret = 'Re: '
else:
ret = ''
if tags:
tags.sort()
ret += '[' + ']['.join(tags) + '] '
if ot:
ret += '[OT]'
ret += ''.join(usertag) + left
if ret != s:
return ret
def stripSeq(input):
subject = None
while True:
l = next(input)
if l.startswith('Subject: '):
# Subject appears
subject = l
continue
elif subject and l[0] in ' \t':
# Subject continues
subject += l
elif subject:
# Subject ends
s = subject[9:]
s = decode_multiline_header(s)
reformatted = reformat(s)
if not reformatted:
yield subject
else:
yield 'Subject: ' + header.Header(reformatted, 'utf-8').encode() + '\n'
subject = None
yield l
elif l.strip() == '':
yield l
# mail body
yield from input
else:
yield l
if __name__ == '__main__':
stdout = io.TextIOWrapper(sys.stdout.buffer,
encoding='utf-8', errors='surrogateescape')
stdin = io.TextIOWrapper(sys.stdin.buffer,
encoding='utf-8', errors='surrogateescape')
stdout.writelines(stripSeq(iter(stdin)))
|
[
"lilydjwg@gmail.com"
] |
lilydjwg@gmail.com
|
6285bec09869c96c550b642390092cab73c7434a
|
5db26e37400dcd5c95530465357be1acfdce01ac
|
/Python/Remove_Duplicates_from_Sorted_List.py
|
f80739e5078fe5d028f21e02fc7e1add8495d24a
|
[] |
no_license
|
lmx0412/LeetCodeInPython3
|
84e3036025538604ef2fc6b626543de6e87f4d82
|
456f83915ff59323891e5aaf33eb7feeb553dc9a
|
refs/heads/master
| 2022-03-06T06:26:49.812775
| 2022-02-16T08:11:00
| 2022-02-16T08:11:00
| 121,228,467
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,586
|
py
|
# pylint: disable-all
import unittest
"""
Description:
Given a sorted linked list, delete all duplicates such that each element appear only once.
Example 1:
Input: 1->1->2
Output: 1->2
Example 2:
Input: 1->1->2->3->3
Output: 1->2->3
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
l1 = ListNode(1)
l1.next = ListNode(1)
l1.next.next = ListNode(2)
l2 = ListNode(1)
l2.next = ListNode(1)
l2.next.next = ListNode(2)
l2.next.next.next = ListNode(3)
l2.next.next.next.next = ListNode(3)
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
ret = ListNode(None)
tmp = ret
while head:
if head.val != tmp.val:
tmp.next = ListNode(head.val)
head = head.next
tmp = tmp.next
else:
head = head.next
return ret.next
class MyTest(unittest.TestCase):
def test_example1(self):
solution = Solution()
result = []
resultnode = solution.deleteDuplicates(l1)
while resultnode:
result.append(resultnode.val)
resultnode = resultnode.next
self.assertEqual(result, [1, 2])
def test_example2(self):
solution = Solution()
result = []
resultnode = solution.deleteDuplicates(l2)
while resultnode:
result.append(resultnode.val)
resultnode = resultnode.next
self.assertEqual(result, [1, 2, 3])
if __name__ == '__main__':
unittest.main()
|
[
"1024260510@qq.com"
] |
1024260510@qq.com
|
f408edbcd23e994916a76a8d9df92ad59a515c21
|
a26f34239feebb59a73cc1c5730bb350a8c4ce1f
|
/app/recipe/tests/test_recipes_api.py
|
3aa4dd6b17979bbc5c48a4b4d608dd2aa3d19162
|
[] |
no_license
|
LukaszMalucha/Django-REST
|
361024a5eaf46a1c425d71df3bb37339cc1ba5b4
|
5629889fcf9e072adeae18013dd11dd320a103dc
|
refs/heads/master
| 2021-08-22T10:39:11.666672
| 2020-05-15T12:49:03
| 2020-05-15T12:49:03
| 142,205,757
| 1
| 0
| null | 2021-06-10T21:09:29
| 2018-07-24T19:48:03
|
Python
|
UTF-8
|
Python
| false
| false
| 5,165
|
py
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Recipe, Tag, Ingredient
from recipe.serializers import RecipeSerializer, RecipeDetailSerializer
RECIPES_URL = reverse('recipe:recipe-list')
def detail_url(recipe_id):
"""Return recipe detail URL"""
return reverse('recipe:recipe-detail', args=[recipe_id])
def sample_tag(user, name='Main course'):
"""Create and return a sample tag"""
return Tag.objects.create(user=user, name=name)
def sample_ingredient(user, name='Cinnamon'):
"""Create and return a sample ingredient"""
return Ingredient.objects.create(user=user, name=name)
def sample_recipe(user, **params):
"""Create and return a sample recipe"""
defaults = {
'title': 'Sample recipe',
'time_minutes': 15,
'price': 2.00
}
defaults.update(params)
return Recipe.objects.create(user=user, **defaults)
class PublicRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
def test_auth_required(self):
"""Test that authentication is required"""
res = self.client.get(RECIPES_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateRecipeApiTests(TestCase):
"""Test unauthenticated recipe API access"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test123@gmail.com',
'test123'
)
self.client.force_authenticate(self.user)
def test_retrieve_recipes(self):
"""Test retrieving a list of recipes"""
sample_recipe(user=self.user)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.all().order_by('-id')
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_recipes_limited_to_user(self):
"""Test retrieving recipes for user"""
user2 = get_user_model().objects.create_user(
'tester2@gmail.com',
'test12345'
)
sample_recipe(user=user2)
sample_recipe(user=self.user)
res = self.client.get(RECIPES_URL)
recipes = Recipe.objects.filter(user=self.user)
serializer = RecipeSerializer(recipes, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data, serializer.data)
def test_view_recipe_detail(self):
"""Test viewing a recipe detail"""
recipe = sample_recipe(user=self.user)
recipe.tags.add(sample_tag(user=self.user))
recipe.ingredients.add(sample_ingredient(user=self.user))
url = detail_url(recipe.id)
res = self.client.get(url)
serializer = RecipeDetailSerializer(recipe)
self.assertEqual(res.data, serializer.data)
def test_creating_basic_recipe(self):
"""Test creating recipe"""
payload = {
'title': 'Chocolate Cheesecake',
'time_minutes': 30,
'price': 5.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
for key in payload.keys():
self.assertEqual(payload[key], getattr(recipe, key))
def test_create_recipe_with_tags(self):
"""Test creating a recipe with tags"""
tag1 = sample_tag(user=self.user, name='Vegan')
tag2 = sample_tag(user=self.user, name='Dessert')
payload = {
'title': 'Avocado lime cheesecake',
'tags' : [tag1.id, tag2.id],
'time_minutes' : 60,
'price': 20.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
tags = recipe.tags.all()
self.assertEqual(tags.count(), 2)
self.assertIn(tag1, tags)
self.assertIn(tag2, tags)
def test_create_recipe_with_ingredients(self):
"""Test creating recipe with ingredients"""
ingredient1 = sample_ingredient(user=self.user, name='Water')
ingredient2 = sample_ingredient(user=self.user, name='Ginger')
payload = {
'title': 'Ginger Water',
'ingredients': [ingredient1.id, ingredient2.id],
'time_minutes': 20,
'price': 7.00
}
res = self.client.post(RECIPES_URL, payload)
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
recipe = Recipe.objects.get(id=res.data['id'])
ingredients = recipe.ingredients.all()
self.assertEqual(ingredients.count(), 2)
self.assertIn(ingredient1, ingredients)
self.assertIn(ingredient1, ingredients)
|
[
"lucasmalucha@gmail.com"
] |
lucasmalucha@gmail.com
|
e71475b38193cc3d4ed1848a2d31f86844acdecd
|
99deab5f52fd7262a26de9aa5d0163bfa738590f
|
/python/leetcode/tree_bst/510_in_order_successor.py
|
2fd58586b6fd1affe3b38cc52b6e5128c005624f
|
[] |
no_license
|
zchen0211/topcoder
|
e47fc07c928b83138e27fd6681b373ce499480b0
|
4d73e4c1f2017828ff2d36058819988146356abe
|
refs/heads/master
| 2022-01-17T16:54:35.871026
| 2019-05-08T19:26:23
| 2019-05-13T05:19:46
| 84,052,683
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,694
|
py
|
"""
510. Inorder Successor in BST II (Medium)
Given a binary search tree and a node in it, find the in-order successor of that node in the BST.
The successor of a node p is the node with the smallest key greater than p.val.
You will have direct access to the node but not to the root of the tree. Each node will have a reference to its parent node.
Example 1:
Input:
root = {"$id":"1","left":{"$id":"2","left":null,"parent":{"$ref":"1"},"right":null,"val":1},"parent":null,"right":{"$id":"3","left":null,"parent":{"$ref":"1"},"right":null,"val":3},"val":2}
p = 1
Output: 2
Explanation: 1's in-order successor node is 2. Note that both p and the return value is of Node type.
Example 2:
Input:
root = {"$id":"1","left":{"$id":"2","left":{"$id":"3","left":{"$id":"4","left":null,"parent":{"$ref":"3"},"right":null,"val":1},"parent":{"$ref":"2"},"right":null,"val":2},"parent":{"$ref":"1"},"right":{"$id":"5","left":null,"parent":{"$ref":"2"},"right":null,"val":4},"val":3},"parent":null,"right":{"$id":"6","left":null,"parent":{"$ref":"1"},"right":null,"val":6},"val":5}
p = 6
Output: null
Explanation: There is no in-order successor of the current node, so the answer is null.
Example 3:
Input:
root = {"$id":"1","left":{"$id":"2","left":{"$id":"3","left":{"$id":"4","left":null,"parent":{"$ref":"3"},"right":null,"val":2},"parent":{"$ref":"2"},"right":{"$id":"5","left":null,"parent":{"$ref":"3"},"right":null,"val":4},"val":3},"parent":{"$ref":"1"},"right":{"$id":"6","left":null,"parent":{"$ref":"2"},"right":{"$id":"7","left":{"$id":"8","left":null,"parent":{"$ref":"7"},"right":null,"val":9},"parent":{"$ref":"6"},"right":null,"val":13},"val":7},"val":6},"parent":null,"right":{"$id":"9","left":{"$id":"10","left":null,"parent":{"$ref":"9"},"right":null,"val":17},"parent":{"$ref":"1"},"right":{"$id":"11","left":null,"parent":{"$ref":"9"},"right":null,"val":20},"val":18},"val":15}
p = 15
Output: 17
Example 4:
Input:
root = {"$id":"1","left":{"$id":"2","left":{"$id":"3","left":{"$id":"4","left":null,"parent":{"$ref":"3"},"right":null,"val":2},"parent":{"$ref":"2"},"right":{"$id":"5","left":null,"parent":{"$ref":"3"},"right":null,"val":4},"val":3},"parent":{"$ref":"1"},"right":{"$id":"6","left":null,"parent":{"$ref":"2"},"right":{"$id":"7","left":{"$id":"8","left":null,"parent":{"$ref":"7"},"right":null,"val":9},"parent":{"$ref":"6"},"right":null,"val":13},"val":7},"val":6},"parent":null,"right":{"$id":"9","left":{"$id":"10","left":null,"parent":{"$ref":"9"},"right":null,"val":17},"parent":{"$ref":"1"},"right":{"$id":"11","left":null,"parent":{"$ref":"9"},"right":null,"val":20},"val":18},"val":15}
p = 13
Output: 15
Note:
If the given node has no in-order successor in the tree, return null.
It's guaranteed that the values of the tree are unique.
Remember that we are using the Node type instead of TreeNode type so their string representation are different.
Follow up:
Could you solve it without looking up any of the node's values?
"""
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, left, right, parent):
self.val = val
self.left = left
self.right = right
self.parent = parent
"""
class Solution(object):
def inorderSuccessor(self, node):
"""
:type node: Node
:rtype: Node
"""
if node is None:
return None
if node.right is not None:
n = node.right
while n.left is not None:
n = n.left
return n
n = node
while n is not None:
if n.parent is not None and n.parent.left is n:
return n.parent
n = n.parent
return None
|
[
"chenzhuoyuan07@gmail.com"
] |
chenzhuoyuan07@gmail.com
|
945982de41a158945afd90377f2fd321c325e05f
|
09c54360cd36134671f55079201e2ab6eb6f8de5
|
/src/python/pants/backend/shell/tailor.py
|
abc8d9fb02057b311dcda594e67e2fd92c74889f
|
[
"Apache-2.0"
] |
permissive
|
chebbyChefNEQ/pants
|
5de4fa9684103427b85ca632bf751506b5e16621
|
a53b9d29a160f36f9af1d1a2c43a693b6a55fa55
|
refs/heads/main
| 2023-08-15T12:07:52.846972
| 2021-09-25T16:04:09
| 2021-09-25T16:04:09
| 404,178,736
| 0
| 0
|
Apache-2.0
| 2021-09-08T01:57:22
| 2021-09-08T01:57:22
| null |
UTF-8
|
Python
| false
| false
| 2,545
|
py
|
# Copyright 2021 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import annotations
import os
from dataclasses import dataclass
from typing import Iterable
from pants.backend.shell.target_types import ShellLibrary, Shunit2Tests, Shunit2TestsSources
from pants.core.goals.tailor import (
AllOwnedSources,
PutativeTarget,
PutativeTargets,
PutativeTargetsRequest,
group_by_dir,
)
from pants.engine.fs import PathGlobs, Paths
from pants.engine.internals.selectors import Get
from pants.engine.rules import collect_rules, rule
from pants.engine.target import Target
from pants.engine.unions import UnionRule
from pants.source.filespec import Filespec, matches_filespec
from pants.util.logging import LogLevel
@dataclass(frozen=True)
class PutativeShellTargetsRequest(PutativeTargetsRequest):
pass
def classify_source_files(paths: Iterable[str]) -> dict[type[Target], set[str]]:
"""Returns a dict of target type -> files that belong to targets of that type."""
tests_filespec = Filespec(includes=list(Shunit2TestsSources.default))
test_filenames = set(
matches_filespec(tests_filespec, paths=[os.path.basename(path) for path in paths])
)
test_files = {path for path in paths if os.path.basename(path) in test_filenames}
library_files = set(paths) - test_files
return {Shunit2Tests: test_files, ShellLibrary: library_files}
@rule(level=LogLevel.DEBUG, desc="Determine candidate shell targets to create")
async def find_putative_targets(
req: PutativeShellTargetsRequest, all_owned_sources: AllOwnedSources
) -> PutativeTargets:
all_shell_files = await Get(Paths, PathGlobs, req.search_paths.path_globs("*.sh"))
unowned_shell_files = set(all_shell_files.files) - set(all_owned_sources)
classified_unowned_shell_files = classify_source_files(unowned_shell_files)
pts = []
for tgt_type, paths in classified_unowned_shell_files.items():
for dirname, filenames in group_by_dir(paths).items():
name = "tests" if tgt_type == Shunit2Tests else os.path.basename(dirname)
kwargs = {"name": name} if tgt_type == Shunit2Tests else {}
pts.append(
PutativeTarget.for_target_type(
tgt_type, dirname, name, sorted(filenames), kwargs=kwargs
)
)
return PutativeTargets(pts)
def rules():
return [*collect_rules(), UnionRule(PutativeTargetsRequest, PutativeShellTargetsRequest)]
|
[
"noreply@github.com"
] |
chebbyChefNEQ.noreply@github.com
|
bcd4c17ab5a06fe99f3c43abb1302429ee58b698
|
622820c4e962a48762848f2e04f0c5f5d6fe4e7e
|
/leukemia/gene_annotation_rna_processing.py
|
21d3a51df813b46d480c0008317ecc1efb47202b
|
[] |
no_license
|
lillianpetersen/activity-by-contact
|
e805ead4ab9aaf810d6d7ef2371a57dab978fcc3
|
6b85c31a3ce723a8d47cac753be5fe5d02b63625
|
refs/heads/master
| 2020-09-07T20:10:25.391202
| 2020-02-03T18:33:10
| 2020-02-03T18:33:10
| 220,900,954
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,072
|
py
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sys import exit
from scipy import stats
from collections import Counter
import statsmodels.stats.multitest as multi
from collections import defaultdict
import re
import gzip
wd = '/pbld/mcg/lillianpetersen/ABC/'
wdvars = '/pbld/mcg/lillianpetersen/ABC/saved_variables/'
wdfigs = '/pbld/mcg/lillianpetersen/ABC/figures/'
wddata = '/pbld/mcg/lillianpetersen/ABC/data/'
MakePlots = False
rnaGenes = np.load(wddata+'RNA/geneIDs.npy')
GTF_HEADER = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame']
R_SEMICOLON = re.compile(r'\s*;\s*')
R_COMMA = re.compile(r'\s*,\s*')
R_KEYVALUE = re.compile(r'(\s+|\s*=\s*)')
def dataframe(filename):
"""Open an optionally gzipped GTF file and return a pandas.DataFrame.
"""
# Each column is a list stored as a value in this dict.
result = defaultdict(list)
for i, line in enumerate(lines(filename)):
for key in line.keys():
# This key has not been seen yet, so set it to None for all
# previous lines.
if key not in result:
result[key] = [None] * i
# Ensure this row has some value for each column.
for key in result.keys():
result[key].append(line.get(key, None))
return pd.DataFrame(result)
def lines(filename):
"""Open an optionally gzipped GTF file and generate a dict for each line.
"""
fn_open = gzip.open if filename.endswith('.gz') else open
with fn_open(filename) as fh:
for line in fh:
if line.startswith('#'):
continue
else:
yield parse(line)
def parse(line):
"""Parse a single GTF line and return a dict.
"""
result = {}
fields = line.rstrip().split('\t')
for i, col in enumerate(GTF_HEADER):
result[col] = _get_value(fields[i])
# INFO field consists of "key1=value;key2=value;...".
infos = [x for x in re.split(R_SEMICOLON, fields[8]) if x.strip()]
for i, info in enumerate(infos, 1):
# It should be key="value".
try:
key, _, value = re.split(R_KEYVALUE, info, 1)
# But sometimes it is just "value".
except ValueError:
key = 'INFO{}'.format(i)
value = info
# Ignore the field if there is no value.
if value:
result[key] = _get_value(value)
return result
def _get_value(value):
if not value:
return None
# Strip double and single quotes.
value = value.strip('"\'')
# Return a list if the value has a comma.
if ',' in value:
value = re.split(R_COMMA, value)
# These values are equivalent to None.
elif value in ['', '.', 'NA']:
return None
return value
df = dataframe(wddata+'gencode.v25.annotation.gtf')
## Select only genes (not trascripts, rnastuff, ect)
geneMask = df['feature']=='gene'
dfGenes = df[geneMask]
## Get only genes present in our data
GeneMask = np.isin(dfGenes['gene_id'],rnaGenes)
dfRNAgenes = dfGenes[GeneMask]
dfRNAgenes = dfRNAgenes.reset_index()
## Make sure all of the genes in our dataset are in this one
rnaGenesMask = np.isin(rnaGenes,dfRNAgenes['gene_id'])
np.save(wdvars+'validation_K562/inFileMask.npy',rnaGenesMask)
rnaGenes = rnaGenes[rnaGenesMask]
## Remove duplicate gene names
#names,counts = np.unique(dfRNAgenes['gene_name'],return_counts=True)
#badGenes = names[counts>1]
#bad = np.array(np.isin(dfRNAgenes['gene_name'],badGenes) ,dtype=bool)
#dfBad = dfRNAgenes[bad]
## Dictionary geneName --> geneType
idTypeDict = {}
for igene in range(len(dfRNAgenes)):
idTypeDict[ dfRNAgenes['gene_id'].iloc[igene] ] = dfRNAgenes['gene_type'].iloc[igene]
## Create arrays of geneName, Chromosome, start, and stop the same size as rnaGenes
## Limit to protein-coding genes
proteinCodingMask = np.ones(shape = (len(rnaGenes)),dtype=bool)
chrm = np.zeros(shape=len(rnaGenes),dtype=object)
start = np.zeros(shape=len(rnaGenes))
stop = np.zeros(shape=len(rnaGenes))
length = np.zeros(shape=len(rnaGenes))
geneName = np.zeros(shape=len(rnaGenes),dtype=object)
direction = np.zeros(shape=len(rnaGenes),dtype=object)
for igene in range(len(rnaGenes)):
name = rnaGenes[igene]
proteinCodingMask[igene] = idTypeDict[name]=='protein_coding'
dfCurrent = dfRNAgenes[dfRNAgenes['gene_id']==name]
geneName[igene] = np.array(dfCurrent['gene_name'])[0]
chrm[igene] = np.array(dfCurrent['seqname'])[0]
start[igene] = np.array(dfCurrent['start'])[0]
stop[igene] = np.array(dfCurrent['end'])[0]
length[igene] = stop[igene] - start[igene]
direction[igene] = np.array(dfCurrent['strand'])[0]
proteinCodingMask = np.array(1-proteinCodingMask,dtype=bool) # 1=bad, 0=good
np.save(wdvars+'RNA/proteinCodingMask.npy',proteinCodingMask)
np.save(wdvars+'RNA/geneName.npy',geneName)
np.save(wdvars+'RNA/geneChr.npy',chrm)
np.save(wdvars+'RNA/geneStart.npy',start)
np.save(wdvars+'RNA/geneStop.npy',stop)
np.save(wdvars+'RNA/geneLength.npy',length)
np.save(wdvars+'RNA/direction.npy',direction)
|
[
"lilliankay.petersen@gmail.com"
] |
lilliankay.petersen@gmail.com
|
cc1051b78d5c6c430cb982290b310e1b78a7d710
|
e39d4196fe0c896a42c1e7bb80fcdb6903e2d14c
|
/ch10/10_1.py
|
769cc07d0b15f218948792f26efc0587e81473ad
|
[] |
no_license
|
ZY1N/Pythonforinfomatics
|
eacb8a8d7feb6757155c3c0fde657bba6013a588
|
14f201171ee39aaa2d9171b5e77c345d15dfbdc4
|
refs/heads/master
| 2020-05-26T18:20:40.233616
| 2019-06-13T20:28:03
| 2019-06-13T20:28:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 856
|
py
|
#Exercise 10.1 Revise a previous program as follows: Read and parse the From
#lines and pull out the addresses from the line. Count the number of messages from
#each person using a dictionary.
#After all the data has been read print the person with the most commits by creating
#a list of (count, email) tuples from the dictionary and then sorting the list in reverse
#order and print out the person who has the most commits.
fname = raw_input("Enter a file name: ")
try:
hand = open(fname)
except:
print "file name is not valid"
exit()
dictionary = dict()
for line in hand:
if line.startswith("From "):
line = line.split()
dictionary [line[1]] = dictionary.get(line[1], 0) + 1
listoftuples = dictionary.items()
lstup = []
for x, y in listoftuples:
lstup.append((y, x))
lstup.sort(reverse=True)
print lstup[:1]
|
[
"yinzhang@e1z2r2p3.42.us.org"
] |
yinzhang@e1z2r2p3.42.us.org
|
81e9c826c7c1eaef4d87398db1b57e8f407b20e2
|
a382716034b91d86ac7c8a548a63d236d6da8032
|
/iaso/migrations/0020_account_default_version.py
|
ffbd274b92d6be478aa8cddaeb589518644218e4
|
[
"MIT"
] |
permissive
|
lpontis/iaso
|
336221335fe33ca9e07e40feb676f57bbdc749ca
|
4d3a9d3faa6b3ed3a2e08c728cc4f03e5a0bbcb6
|
refs/heads/main
| 2023-08-12T20:34:10.823260
| 2021-10-04T07:34:50
| 2021-10-04T07:34:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 519
|
py
|
# Generated by Django 2.1.11 on 2019-11-28 11:45
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("iaso", "0019_auto_20191127_1602")]
operations = [
migrations.AddField(
model_name="account",
name="default_version",
field=models.ForeignKey(
blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to="iaso.SourceVersion"
),
)
]
|
[
"tech@bluesquarehub.com"
] |
tech@bluesquarehub.com
|
d5cfa50ece48fbb131a3a7fc27a6bddf6c237979
|
f806deeb28680f87e436b8c0910c0d40983d9c4a
|
/shop/wsgi.py
|
a7e1b4d5ee28d399903f73b8602cc79f83336d57
|
[] |
no_license
|
AzamatSydykov/pyshop_test
|
34e3bae7fa07485291007630544a65da832aaa40
|
9033ed1c4a74e27d744b86b89f07421f7deae7ef
|
refs/heads/master
| 2023-04-02T14:31:38.170029
| 2021-04-02T11:39:28
| 2021-04-02T11:39:28
| 353,988,652
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 517
|
py
|
"""
WSGI config for shop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from whitenoise import WhiteNoise
from django.core.wsgi import get_wsgi_application
from django.conf import settings
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'shop.settings')
application = get_wsgi_application()
application = WhiteNoise(application, root=settings.STATIC_ROOT)
|
[
"l0808las@gmail.com"
] |
l0808las@gmail.com
|
98b3426127bb99afab2b65254acaddd837d59b19
|
7dbe7a85f3b3ee3d163bb1864233a2cea084f1bd
|
/tests/conftest.py
|
22b005f628ee912287dc2ac0f0ad96a77dfca87b
|
[
"BSD-3-Clause"
] |
permissive
|
gnprice/rupypy
|
82ae3a6133e762db8ff11f62ecfafccf9718bf6a
|
b37b29f3f64b4fb976b75fcd5f2b3b7cf2525f94
|
refs/heads/master
| 2020-12-25T01:51:01.126815
| 2012-10-14T23:47:20
| 2012-10-14T23:47:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 596
|
py
|
import copy
def pytest_funcarg__space(request):
# Inside the function so various intitialization stuff isn't seen until
# coverage is setup.
from rupypy.objspace import ObjectSpace
# Building a space is exceptionally expensive, so we create one once, and
# then just deepcopy it. Note that deepcopying is still fairly expensive
# (at the time of writing about 1/3 of total test time), but significantly
# less so than building a new space.
space = request.cached_setup(
setup=ObjectSpace,
scope="session",
)
return copy.deepcopy(space)
|
[
"alex.gaynor@gmail.com"
] |
alex.gaynor@gmail.com
|
ec379213d9e9990438292b83b965639bcdf794f1
|
3acb90a1e97a0e851c6e4b1b57dda78ec5e3e3b4
|
/problems/maximum_size_subset_with_sum.py
|
0796ed279279222b049ad34e3a8afb5180c153dd
|
[] |
no_license
|
jhyang12345/algorithm-problems
|
fea3c6498cff790fc4932404b5bbab08a6d4a627
|
704355013de9965ec596d2e0115fd2ca9828d0cb
|
refs/heads/master
| 2023-05-15T10:26:52.685471
| 2021-06-01T14:57:38
| 2021-06-01T14:57:38
| 269,333,379
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,399
|
py
|
# This is an extended version of the subset sum problem.
# Here we need to find the size of the maximum size subset
# whose sum is equal to the given sum.
from pprint import pprint
# def max_subset_with_sum(arr, s):
# n = len(arr)
# cache = [[0 for _ in range(n + 1)] for __ in range(n + 1)]
# ret = 0
# for x in range(1, n + 1):
# cur = arr[x - 1]
# for y in range(x):
# if cache[x - 1][y] <= s - cur:
# if cache[x - 1][y] != 0 or y == 0:
# cache[x][y + 1] = cache[x - 1][y] + cur
#
# else:
# cache[x][y] = max(cache[x - 1][y], cache[x]
# if cache[x][y + 1] == s:
# ret = max(ret, y + 1)
# pprint(cache)
# return ret
def max_subset_with_sum(arr, s):
n = len(arr)
cache = [[0 for _ in range(s + 1)] for __ in range(n + 1)]
cache[0][0] = 0
for i in range(1, n):
cur = arr[i - 1]
for j in range(s + 1):
if j == 0:
cache[i][j] = 0
if cache[i][j] != -1:
if j + cur <= s:
cache[i + 1][j + cur] = max(cache[i][j] + 1, cache[i][j + cur])
cache[i + 1][j] = max(cache[i + 1][j], cache[i][j])
return cache[n][s]
arr = [2, 3, 5, 7, 10, 15]
print(max_subset_with_sum(arr, 10))
arr = [1, 2, 3, 4, 5]
print(max_subset_with_sum(arr, 4))
|
[
"jhyang12345@naver.com"
] |
jhyang12345@naver.com
|
638a8e16c973c212a5a3f75aad9ae44281492433
|
c2d7bee139ac41ed31c664c98867345ca9537120
|
/kaggle_environments/envs/lux_ai_s2/test_agents/python/main.py
|
a7837dac8f789ddd28a409f0491811616795d2b4
|
[
"Apache-2.0"
] |
permissive
|
Kaggle/kaggle-environments
|
b184c0ffdadbf6a4277153e2f4cfc8700b90424b
|
e80a16e0c957237221167184df81035e9fc0d937
|
refs/heads/master
| 2023-09-01T02:50:19.091441
| 2023-08-31T18:31:25
| 2023-08-31T18:31:25
| 228,697,796
| 269
| 177
|
Apache-2.0
| 2023-08-31T18:19:38
| 2019-12-17T20:39:57
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 2,324
|
py
|
import json
from typing import Dict
import sys
from argparse import Namespace
if __package__ == "":
from agent import Agent
from lux.config import EnvConfig
from lux.kit import GameState, process_obs, to_json, from_json, process_action, obs_to_game_state
else:
from .agent import Agent
from .lux.config import EnvConfig
from .lux.kit import GameState, process_obs, to_json, from_json, process_action, obs_to_game_state
### DO NOT REMOVE THE FOLLOWING CODE ###
agent_dict = dict() # store potentially multiple dictionaries as kaggle imports code directly
agent_prev_obs = dict()
def agent_fn(observation, configurations):
"""
agent definition for kaggle submission.
"""
global agent_dict
step = observation.step
player = observation.player
remainingOverageTime = observation.remainingOverageTime
if step == 0:
env_cfg = EnvConfig.from_dict(configurations["env_cfg"])
agent_dict[player] = Agent(player, env_cfg)
agent_prev_obs[player] = dict()
agent = agent_dict[player]
agent = agent_dict[player]
obs = process_obs(player, agent_prev_obs[player], step, json.loads(observation.obs))
if step == 100:
with open("obs.json", "w") as f:
json.dump(to_json(obs), f)
agent_prev_obs[player] = obs
agent.step = step
if obs["real_env_steps"] < 0:
actions = agent.early_setup(step, obs, remainingOverageTime)
else:
actions = agent.act(step, obs, remainingOverageTime)
return process_action(actions)
if __name__ == "__main__":
def read_input():
"""
Reads input from stdin
"""
try:
return input()
except EOFError as eof:
raise SystemExit(eof)
step = 0
player_id = 0
configurations = None
i = 0
while True:
inputs = read_input()
obs = json.loads(inputs)
observation = Namespace(**dict(step=obs["step"], obs=json.dumps(obs["obs"]), remainingOverageTime=obs["remainingOverageTime"], player=obs["player"], info=obs["info"]))
if i == 0:
configurations = obs["info"]["env_cfg"]
i += 1
actions = agent_fn(observation, dict(env_cfg=configurations))
# send actions to engine
print(json.dumps(actions))
|
[
"noreply@github.com"
] |
Kaggle.noreply@github.com
|
7ef6f8fb10b7db187aec1a53e9a1c743cc1f62ad
|
77db6591c5884204d6016bfa89b33691bac38813
|
/tamaya/migrations/0001_initial.py
|
30142f817f2e10784af4ac5fddf1f6f2ab75817d
|
[] |
no_license
|
jbukoski/iltf-signal-webmap-suite
|
4fc0aafa977e911a1071872f7adbaf2e7d0da37c
|
b8374e9cfcc80501a8f632721a7cb9b76e668f6b
|
refs/heads/master
| 2021-03-27T11:20:37.174667
| 2020-12-31T18:03:20
| 2020-12-31T18:03:20
| 79,853,039
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,544
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-07-12 07:30
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='boundary',
fields=[
('boundary_id', models.AutoField(primary_key=True, serialize=False)),
('id', models.IntegerField()),
('area', models.FloatField()),
('perimeter', models.FloatField()),
('acres', models.FloatField()),
('comments', models.CharField(max_length=80)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'Boundary',
},
),
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('docfile', models.FileField(upload_to='tamaya/uploaded')),
],
),
migrations.CreateModel(
name='mbls',
fields=[
('mbls_id', models.AutoField(primary_key=True, serialize=False)),
('area', models.FloatField()),
('perimeter', models.FloatField()),
('mbl_field', models.IntegerField()),
('mbl_id', models.IntegerField()),
('acres', models.FloatField()),
('comment', models.CharField(max_length=80)),
('geom', django.contrib.gis.db.models.fields.PolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'Master Business Leases',
},
),
migrations.CreateModel(
name='ndviDiff',
fields=[
('raster_id', models.TextField(primary_key=True, serialize=False)),
('name', models.TextField()),
('raster', django.contrib.gis.db.models.fields.RasterField(srid=4326)),
],
),
migrations.CreateModel(
name='roads',
fields=[
('roads_id', models.AutoField(primary_key=True, serialize=False)),
('distance', models.FloatField()),
('length', models.FloatField()),
('id_field', models.BigIntegerField()),
('access', models.CharField(max_length=254)),
('name', models.CharField(max_length=254)),
('number', models.CharField(max_length=254)),
('surface', models.CharField(max_length=254)),
('condition', models.CharField(max_length=254)),
('road_class', models.FloatField(max_length=254)),
('road_type', models.CharField(max_length=254)),
('sa_id', models.CharField(max_length=254)),
('surf_type', models.CharField(max_length=254)),
('status', models.CharField(max_length=254)),
('hunting', models.CharField(max_length=254)),
('comment', models.CharField(max_length=254)),
('restrict', models.CharField(max_length=254)),
('roadrepair', models.BigIntegerField()),
('geom', django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326)),
],
options={
'verbose_name_plural': 'Reservation Roads',
},
),
migrations.CreateModel(
name='soil_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('poly_id', models.BigIntegerField()),
('areasymbol', models.CharField(max_length=20)),
('spatialver', models.BigIntegerField()),
('musym', models.CharField(max_length=6)),
('mukey', models.CharField(max_length=30)),
('mukey_1', models.CharField(max_length=10)),
('tax_class', models.CharField(max_length=254)),
('org_matter', models.FloatField()),
('composting', models.CharField(max_length=254)),
('texture', models.CharField(max_length=254)),
('ph_water', models.FloatField()),
('bulk_densi', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'Soil Data',
},
),
migrations.CreateModel(
name='subwatersheds',
fields=[
('subwatershed_id', models.AutoField(primary_key=True, serialize=False)),
('id', models.BigIntegerField()),
('watershed', models.CharField(max_length=50)),
('subwatshed', models.IntegerField()),
('wsno', models.CharField(max_length=50)),
('acres', models.FloatField()),
('aveslope', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'Subwatersheds',
},
),
migrations.CreateModel(
name='surfacehydro',
fields=[
('surfacehydro_id', models.AutoField(primary_key=True, serialize=False)),
('id', models.BigIntegerField()),
('geom', django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326)),
],
options={
'verbose_name_plural': 'Surface hydrology',
},
),
migrations.CreateModel(
name='testRaster',
fields=[
('raster_id', models.TextField(default=23, primary_key=True, serialize=False)),
('name', models.TextField(default='tester')),
('raster', django.contrib.gis.db.models.fields.RasterField(srid=4326)),
],
),
migrations.CreateModel(
name='user_lines',
fields=[
('line_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('comment', models.CharField(max_length=100)),
('geom', django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326)),
],
options={
'verbose_name_plural': 'User-defined Lines',
},
),
migrations.CreateModel(
name='user_polygons',
fields=[
('polygon_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('comment', models.CharField(max_length=100)),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'User-defined Polygons',
},
),
migrations.CreateModel(
name='user_pts',
fields=[
('point_id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=30)),
('comment', models.CharField(max_length=100)),
('geom', django.contrib.gis.db.models.fields.MultiPointField(srid=4326)),
],
options={
'verbose_name_plural': 'User-defined Points',
},
),
migrations.CreateModel(
name='watersheds',
fields=[
('watershed_id', models.AutoField(primary_key=True, serialize=False)),
('objectid', models.BigIntegerField()),
('source', models.BigIntegerField()),
('huc_8', models.CharField(max_length=8)),
('hu_8_name', models.CharField(max_length=80)),
('shape_leng', models.FloatField()),
('shape_area', models.FloatField()),
('geom', django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326)),
],
options={
'verbose_name_plural': 'Watersheds',
},
),
]
|
[
"jacob.bukoski@yale.edu"
] |
jacob.bukoski@yale.edu
|
1e902a4337bd78acf19366639ed7099562d1b862
|
4c10b9340d37cbd33c50401c3b50cc5030ed0a68
|
/src/transformers/utils/doc.py
|
a5610a32ba6ba4e5340dcac3024e7823c8c3028a
|
[
"Apache-2.0"
] |
permissive
|
jmhessel/transformers
|
edd041e90f5ec80af591e160b134a86e50961b24
|
25c451e5a044969eb91e1e481574a2bfca5130ca
|
refs/heads/main
| 2023-01-06T21:14:23.193802
| 2022-11-14T21:32:50
| 2022-11-14T21:32:50
| 566,081,752
| 1
| 0
|
Apache-2.0
| 2022-11-14T23:34:57
| 2022-11-14T23:34:56
| null |
UTF-8
|
Python
| false
| false
| 39,118
|
py
|
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Doc utilities: Utilities related to documentation
"""
import functools
import re
import types
def add_start_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
return fn
return docstring_decorator
def add_start_docstrings_to_model_forward(*docstr):
def docstring_decorator(fn):
docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "")
class_name = f"[`{fn.__qualname__.split('.')[0]}`]"
intro = f" The {class_name} forward method, overrides the `__call__` special method."
note = r"""
<Tip>
Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`]
instance afterwards instead of this since the former takes care of running the pre and post processing steps while
the latter silently ignores them.
</Tip>
"""
fn.__doc__ = intro + note + docstring
return fn
return docstring_decorator
def add_end_docstrings(*docstr):
def docstring_decorator(fn):
fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr)
return fn
return docstring_decorator
PT_RETURN_INTRODUCTION = r"""
Returns:
[`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of
`torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various
elements depending on the configuration ([`{config_class}`]) and inputs.
"""
TF_RETURN_INTRODUCTION = r"""
Returns:
[`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if
`return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the
configuration ([`{config_class}`]) and inputs.
"""
def _get_indent(t):
"""Returns the indentation in the first line of t"""
search = re.search(r"^(\s*)\S", t)
return "" if search is None else search.groups()[0]
def _convert_output_args_doc(output_args_doc):
"""Convert output_args_doc to display properly."""
# Split output_arg_doc in blocks argument/description
indent = _get_indent(output_args_doc)
blocks = []
current_block = ""
for line in output_args_doc.split("\n"):
# If the indent is the same as the beginning, the line is the name of new arg.
if _get_indent(line) == indent:
if len(current_block) > 0:
blocks.append(current_block[:-1])
current_block = f"{line}\n"
else:
# Otherwise it's part of the description of the current arg.
# We need to remove 2 spaces to the indentation.
current_block += f"{line[2:]}\n"
blocks.append(current_block[:-1])
# Format each block for proper rendering
for i in range(len(blocks)):
blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i])
blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i])
return "\n".join(blocks)
def _prepare_output_docstrings(output_type, config_class, min_indent=None):
"""
Prepares the return part of the docstring using `output_type`.
"""
output_docstring = output_type.__doc__
# Remove the head of the docstring to keep the list of args only
lines = output_docstring.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None:
i += 1
if i < len(lines):
params_docstring = "\n".join(lines[(i + 1) :])
params_docstring = _convert_output_args_doc(params_docstring)
# Add the return introduction
full_output_type = f"{output_type.__module__}.{output_type.__name__}"
intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION
intro = intro.format(full_output_type=full_output_type, config_class=config_class)
result = intro + params_docstring
# Apply minimum indent if necessary
if min_indent is not None:
lines = result.split("\n")
# Find the indent of the first nonempty line
i = 0
while len(lines[i]) == 0:
i += 1
indent = len(_get_indent(lines[i]))
# If too small, add indentation to all nonempty lines
if indent < min_indent:
to_add = " " * (min_indent - indent)
lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines]
result = "\n".join(lines)
return result
PT_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer(
... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt"
... )
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_token_class_ids = logits.argmax(-1)
>>> # Note that tokens are classified rather then input words which means that
>>> # there might be more predicted token classes than words.
>>> # Multiple token classes might account for the same word
>>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]]
>>> predicted_tokens_classes
{expected_output}
```
```python
>>> labels = predicted_token_class_ids
>>> loss = model(**inputs, labels=labels).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> answer_start_index = outputs.start_logits.argmax()
>>> answer_end_index = outputs.end_logits.argmax()
>>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
>>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True)
{expected_output}
```
```python
>>> # target is "nice puppet"
>>> target_start_index = torch.tensor([{qa_target_start_index}])
>>> target_end_index = torch.tensor([{qa_target_end_index}])
>>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
>>> loss = outputs.loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example of single-label classification:
```python
>>> import torch
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_id = logits.argmax().item()
>>> model.config.id2label[predicted_class_id]
{expected_output}
```
```python
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels)
>>> labels = torch.tensor([1])
>>> loss = model(**inputs, labels=labels).loss
>>> round(loss.item(), 2)
{expected_loss}
```
Example of multi-label classification:
```python
>>> import torch
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_id = logits.argmax().item()
>>> model.config.id2label[predicted_class_id]
{expected_output}
```
```python
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = {model_class}.from_pretrained(
... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification"
... )
>>> labels = torch.nn.functional.one_hot(torch.tensor([predicted_class_id]), num_classes=num_labels).to(
... torch.float
... )
>>> loss = model(**inputs, labels=labels).loss
>>> loss.backward() # doctest: +IGNORE_RESULT
```
"""
PT_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # retrieve index of {mask}
>>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0]
>>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1)
>>> tokenizer.decode(predicted_token_id)
{expected_output}
```
```python
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"]
>>> # mask labels of non-{mask} tokens
>>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
>>> outputs = model(**inputs, labels=labels)
>>> round(outputs.loss.item(), 2)
{expected_loss}
```
"""
PT_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
PT_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True)
>>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> import torch
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
>>> outputs = model(**inputs, labels=inputs["input_ids"])
>>> loss = outputs.loss
>>> logits = outputs.logits
```
"""
PT_SPEECH_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
PT_SPEECH_CTC_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_ids = torch.argmax(logits, dim=-1)
>>> # transcribe speech
>>> transcription = processor.batch_decode(predicted_ids)
>>> transcription[0]
{expected_output}
```
```python
>>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids
>>> # compute loss
>>> loss = model(**inputs).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SPEECH_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> predicted_class_ids = torch.argmax(logits, dim=-1).item()
>>> predicted_label = model.config.id2label[predicted_class_ids]
>>> predicted_label
{expected_output}
```
```python
>>> # compute loss - target_label is e.g. "down"
>>> target_label = model.config.id2label[0]
>>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]])
>>> loss = model(**inputs).loss
>>> round(loss.item(), 2)
{expected_loss}
```
"""
PT_SPEECH_FRAME_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate)
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> probabilities = torch.sigmoid(logits[0])
>>> # labels is a one-hot array of shape (num_frames, num_speakers)
>>> labels = (probabilities > 0.5).long()
>>> labels[0].tolist()
{expected_output}
```
"""
PT_SPEECH_XVECTOR_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import torch
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = feature_extractor(
... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True
... )
>>> with torch.no_grad():
... embeddings = model(**inputs).embeddings
>>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu()
>>> # the resulting embeddings can be used for cosine similarity-based retrieval
>>> cosine_sim = torch.nn.CosineSimilarity(dim=-1)
>>> similarity = cosine_sim(embeddings[0], embeddings[1])
>>> threshold = 0.7 # the optimal threshold is dataset-dependent
>>> if similarity < threshold:
... print("Speakers are not the same!")
>>> round(similarity.item(), 2)
{expected_output}
```
"""
PT_VISION_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = feature_extractor(image, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
PT_VISION_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import torch
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = feature_extractor(image, return_tensors="pt")
>>> with torch.no_grad():
... logits = model(**inputs).logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_label = logits.argmax(-1).item()
>>> print(model.config.id2label[predicted_label])
{expected_output}
```
"""
PT_SAMPLE_DOCSTRINGS = {
"SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": PT_MASKED_LM_SAMPLE,
"LMHead": PT_CAUSAL_LM_SAMPLE,
"BaseModel": PT_BASE_MODEL_SAMPLE,
"SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE,
"CTC": PT_SPEECH_CTC_SAMPLE,
"AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE,
"AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE,
"AudioXVector": PT_SPEECH_XVECTOR_SAMPLE,
"VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE,
"ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE,
}
TF_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer(
... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf"
... )
>>> logits = model(**inputs).logits
>>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1)
>>> # Note that tokens are classified rather then input words which means that
>>> # there might be more predicted token classes than words.
>>> # Multiple token classes might account for the same word
>>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()]
>>> predicted_tokens_classes
{expected_output}
```
```python
>>> labels = predicted_token_class_ids
>>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss)
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="tf")
>>> outputs = model(**inputs)
>>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
>>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])
>>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
>>> tokenizer.decode(predict_answer_tokens)
{expected_output}
```
```python
>>> # target is "nice puppet"
>>> target_start_index = tf.constant([{qa_target_start_index}])
>>> target_end_index = tf.constant([{qa_target_end_index}])
>>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
>>> loss = tf.math.reduce_mean(outputs.loss)
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> logits = model(**inputs).logits
>>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0])
>>> model.config.id2label[predicted_class_id]
{expected_output}
```
```python
>>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)`
>>> num_labels = len(model.config.id2label)
>>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels)
>>> labels = tf.constant(1)
>>> loss = model(**inputs, labels=labels).loss
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf")
>>> logits = model(**inputs).logits
>>> # retrieve index of {mask}
>>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0])
>>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index)
>>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1)
>>> tokenizer.decode(predicted_token_id)
{expected_output}
```
```python
>>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"]
>>> # mask labels of non-{mask} tokens
>>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100)
>>> outputs = model(**inputs, labels=labels)
>>> round(float(outputs.loss), 2)
{expected_loss}
```
"""
TF_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
TF_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True)
>>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}}
>>> outputs = model(inputs) # batch size is 1
>>> # the linear classifier still needs to be trained
>>> logits = outputs.logits
```
"""
TF_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf")
>>> outputs = model(inputs)
>>> logits = outputs.logits
```
"""
TF_SPEECH_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
TF_SPEECH_CTC_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> import tensorflow as tf
>>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation")
>>> dataset = dataset.sort("id")
>>> sampling_rate = dataset.features["audio"].sampling_rate
>>> processor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> # audio file is decoded on the fly
>>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf")
>>> logits = model(**inputs).logits
>>> predicted_ids = tf.math.argmax(logits, axis=-1)
>>> # transcribe speech
>>> transcription = processor.batch_decode(predicted_ids)
>>> transcription[0]
{expected_output}
```
```python
>>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids
>>> # compute loss
>>> loss = model(**inputs).loss
>>> round(float(loss), 2)
{expected_loss}
```
"""
TF_VISION_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = feature_extractor(image, return_tensors="tf")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
>>> list(last_hidden_states.shape)
{expected_output}
```
"""
TF_VISION_SEQ_CLASS_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> import tensorflow as tf
>>> from datasets import load_dataset
>>> dataset = load_dataset("huggingface/cats-image")
>>> image = dataset["test"]["image"][0]
>>> feature_extractor = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = feature_extractor(image, return_tensors="tf")
>>> logits = model(**inputs).logits
>>> # model predicts one of the 1000 ImageNet classes
>>> predicted_label = int(tf.math.argmax(logits, axis=-1))
>>> print(model.config.id2label[predicted_label])
{expected_output}
```
"""
TF_SAMPLE_DOCSTRINGS = {
"SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": TF_MASKED_LM_SAMPLE,
"LMHead": TF_CAUSAL_LM_SAMPLE,
"BaseModel": TF_BASE_MODEL_SAMPLE,
"SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE,
"CTC": TF_SPEECH_CTC_SAMPLE,
"VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE,
"ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE,
}
FLAX_TOKEN_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_QUESTION_ANSWERING_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet"
>>> inputs = tokenizer(question, text, return_tensors="jax")
>>> outputs = model(**inputs)
>>> start_scores = outputs.start_logits
>>> end_scores = outputs.end_logits
```
"""
FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_MASKED_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
```
"""
FLAX_BASE_MODEL_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax")
>>> outputs = model(**inputs)
>>> last_hidden_states = outputs.last_hidden_state
```
"""
FLAX_MULTIPLE_CHOICE_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> choice0 = "It is eaten with a fork and a knife."
>>> choice1 = "It is eaten while held in the hand."
>>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True)
>>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}})
>>> logits = outputs.logits
```
"""
FLAX_CAUSAL_LM_SAMPLE = r"""
Example:
```python
>>> from transformers import {processor_class}, {model_class}
>>> tokenizer = {processor_class}.from_pretrained("{checkpoint}")
>>> model = {model_class}.from_pretrained("{checkpoint}")
>>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np")
>>> outputs = model(**inputs)
>>> # retrieve logts for next token
>>> next_token_logits = outputs.logits[:, -1]
```
"""
FLAX_SAMPLE_DOCSTRINGS = {
"SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE,
"QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE,
"TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE,
"MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE,
"MaskedLM": FLAX_MASKED_LM_SAMPLE,
"BaseModel": FLAX_BASE_MODEL_SAMPLE,
"LMHead": FLAX_CAUSAL_LM_SAMPLE,
}
def add_code_sample_docstrings(
*docstr,
processor_class=None,
checkpoint=None,
output_type=None,
config_class=None,
mask="[MASK]",
qa_target_start_index=14,
qa_target_end_index=15,
model_cls=None,
modality=None,
expected_output="",
expected_loss="",
):
def docstring_decorator(fn):
# model_class defaults to function's class if not specified otherwise
model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls
if model_class[:2] == "TF":
sample_docstrings = TF_SAMPLE_DOCSTRINGS
elif model_class[:4] == "Flax":
sample_docstrings = FLAX_SAMPLE_DOCSTRINGS
else:
sample_docstrings = PT_SAMPLE_DOCSTRINGS
# putting all kwargs for docstrings in a dict to be used
# with the `.format(**doc_kwargs)`. Note that string might
# be formatted with non-existing keys, which is fine.
doc_kwargs = dict(
model_class=model_class,
processor_class=processor_class,
checkpoint=checkpoint,
mask=mask,
qa_target_start_index=qa_target_start_index,
qa_target_end_index=qa_target_end_index,
expected_output=expected_output,
expected_loss=expected_loss,
)
if "SequenceClassification" in model_class and modality == "audio":
code_sample = sample_docstrings["AudioClassification"]
elif "SequenceClassification" in model_class:
code_sample = sample_docstrings["SequenceClassification"]
elif "QuestionAnswering" in model_class:
code_sample = sample_docstrings["QuestionAnswering"]
elif "TokenClassification" in model_class:
code_sample = sample_docstrings["TokenClassification"]
elif "MultipleChoice" in model_class:
code_sample = sample_docstrings["MultipleChoice"]
elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]:
code_sample = sample_docstrings["MaskedLM"]
elif "LMHead" in model_class or "CausalLM" in model_class:
code_sample = sample_docstrings["LMHead"]
elif "CTC" in model_class:
code_sample = sample_docstrings["CTC"]
elif "AudioFrameClassification" in model_class:
code_sample = sample_docstrings["AudioFrameClassification"]
elif "XVector" in model_class and modality == "audio":
code_sample = sample_docstrings["AudioXVector"]
elif "Model" in model_class and modality == "audio":
code_sample = sample_docstrings["SpeechBaseModel"]
elif "Model" in model_class and modality == "vision":
code_sample = sample_docstrings["VisionBaseModel"]
elif "Model" in model_class or "Encoder" in model_class:
code_sample = sample_docstrings["BaseModel"]
elif "ImageClassification" in model_class:
code_sample = sample_docstrings["ImageClassification"]
else:
raise ValueError(f"Docstring can't be built for model {model_class}")
func_doc = (fn.__doc__ or "") + "".join(docstr)
output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class)
built_doc = code_sample.format(**doc_kwargs)
fn.__doc__ = func_doc + output_doc + built_doc
return fn
return docstring_decorator
def replace_return_docstrings(output_type=None, config_class=None):
def docstring_decorator(fn):
func_doc = fn.__doc__
lines = func_doc.split("\n")
i = 0
while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None:
i += 1
if i < len(lines):
indent = len(_get_indent(lines[i]))
lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent)
func_doc = "\n".join(lines)
else:
raise ValueError(
f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, "
f"current docstring is:\n{func_doc}"
)
fn.__doc__ = func_doc
return fn
return docstring_decorator
def copy_func(f):
"""Returns a copy of a function f."""
# Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
|
[
"noreply@github.com"
] |
jmhessel.noreply@github.com
|
bfe95bf2133abbcf730d8bfcde647cd39a0b0566
|
6ede75099fc38a682e030d70051389ea182d6cc2
|
/実装/クライアント/sidi2/incomeexpensesapi/menu/migrations/0030_auto_20201220_1600.py
|
ac289e2b32745ce412c3f49524bf52b13b178444
|
[] |
no_license
|
hangtran93tk/team06
|
0d86e59be866d7f6bda1b6c81f725ca1f80eba0f
|
89000be20c18d3b9610c240b25c7c1944fc68d6d
|
refs/heads/master
| 2023-03-12T11:58:03.802711
| 2021-02-26T03:51:36
| 2021-02-26T03:51:36
| 279,473,813
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 612
|
py
|
# Generated by Django 3.0.5 on 2020-12-20 07:00
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('menu', '0029_auto_20201220_1558'),
]
operations = [
migrations.AlterField(
model_name='menuinfo',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, related_name='infouser', to=settings.AUTH_USER_MODEL),
),
]
|
[
"hangtrand6qt1@gmail.com"
] |
hangtrand6qt1@gmail.com
|
e4ffd15029d81b16c7e5358ed60f6b17ff8e7e28
|
8db839ce40bb09b3634201ffbf3dae5342457d95
|
/DictProj1.py
|
fec142ba107a6466b6094dc01013e5269f332a3e
|
[] |
no_license
|
Liam876/HomeWork-Dina
|
e2803cba6de2fb9e8103837373805eef4f3dda28
|
e40596c14244cf8a608cbfc1715d5ad7db44fd61
|
refs/heads/master
| 2023-01-13T00:45:40.108337
| 2020-11-06T13:19:38
| 2020-11-06T13:19:38
| 212,346,549
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,828
|
py
|
def createdic(t): # Gets num of keys to be in dic
dictionary = {} # Returns formed dic
for rep in range(t):
a = rep + 1
print(str(a) + " time")
key = getinput("key")
value = getinput("value")
if key in dictionary:
output = []
li = [dictionary[key]]
li.append(value)
li = reemovNestings(li, output)
dictionary.update({key: li})
else:
dictionary[key] = value
print("The dictionary given is: ")
print(dictionary)
return dictionary
def getinput(thing):
inp = input("insert " + str(thing))
try:
inp = int(inp)
except ValueError:
inp = str(inp)
return inp
def checkey(key, dictionary):
if key in dictionary:
print("yes")
else:
print("no")
def sqrdic():
li_nums = []
dictionary = {}
while True:
num = input("insert numbers to be keys of dict. When finished type a none-whole-number")
try:
gnum = int(num)
except:
break
li_nums.append(gnum)
for num in li_nums:
if num in dictionary:
output = []
li = [dictionary[num]]
li.append(num ** 2)
li = reemovNestings(li, output)
dictionary.update({num: li})
else:
dictionary[num] = num ** 2
print("The dictionary created is:")
print(dictionary)
def reemovNestings(l, output):
for i in l:
if type(i) == list:
reemovNestings(i, output)
else:
output.append(i)
return output
def main():
dict1 = createdic(3)
dict2 = sqrdic()
checkey("one", dict1)
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Liam876.noreply@github.com
|
8fd2eb0d7376aa61d889e25e31931f69fc97eb78
|
c0129f53d99fbcdd690b54ba64a8fe1a54432a99
|
/data_process/py/extraction.py
|
c96b5910470abde3fdf074f40377e95b7262ee1a
|
[] |
no_license
|
zhoumaneo/LCZ_classification-based-deep-learning-
|
ea34d9e3062ecd119613a336dc1f03e898429f6f
|
5d6ea2fbab1cc13bfdec98434e243b95c4a51c53
|
refs/heads/master
| 2020-06-18T14:16:40.852682
| 2019-08-28T03:59:16
| 2019-08-28T03:59:16
| 196,329,811
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 452
|
py
|
import arcpy
import os
path = os.getcwd()
print(path)
arcpy.CheckOutExtension("spatial")
arcpy.gp.overwriteOutput=1
arcpy.env.workspace = "vectors"
raster = "rs_img\\site2_remote_img_clip.tif"
masks= arcpy.ListFeatureClasses("*","polygon")
for mask in masks:
print(mask)
out = "rs_img\\"+"site2_"+mask.split('.')[0]+".tif"
arcpy.gp.ExtractByMask_sa(raster, mask, out)
print("site2_" + mask.split('.')[0] + " has done")
|
[
"15638828135@163.com"
] |
15638828135@163.com
|
1abd27f0ea85df15651b05977861eaf8c8516a52
|
eeacfabfb918c9b0f922a4f6a96e50e63f029fad
|
/001.py
|
2395e2a91db46145085b45f4a13edd2d54a9abe1
|
[] |
no_license
|
lch743/Python
|
f36af505f24cd88ab9900354d14f6a62f71f108c
|
c5bf64def9703842eefab2423347d16a9ae4478d
|
refs/heads/master
| 2021-01-20T15:44:20.024352
| 2012-12-11T08:07:16
| 2012-12-11T08:07:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 126
|
py
|
import string
table=string.maketrans('abcdefghijklmnopqrstuvwxyz','cdefghijklmnopqrstuvwxyzab')
print 'map'.translate(table)
|
[
"lch743@gmail.com"
] |
lch743@gmail.com
|
49967edba567be68611b0031fc718b09de9692db
|
e26104b7d6cc8042b64715cfb6c82e6ef393f439
|
/venv/lib/python3.7/site-packages/pyexcel_ods-0.5.6-py3.7.egg/pyexcel_ods/odsr.py
|
9aa3cee734f50dcfb60d114292874c36944d5215
|
[] |
no_license
|
kartikeyj96/python-basics
|
2a08289600d25c79f7835c7c147e7dbd304934b2
|
c56806580d8a67e38230fefe2a148eb2d89df158
|
refs/heads/master
| 2020-09-15T12:10:53.823270
| 2019-11-22T21:51:59
| 2019-11-22T21:51:59
| 223,440,044
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,422
|
py
|
"""
pyexcel_ods.odsr
~~~~~~~~~~~~~~~~~~~~~
ods reader
:copyright: (c) 2014-2017 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
# Copyright 2011 Marco Conti
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from odf.text import P
from odf.table import Table, TableRow, TableCell
# Thanks to grt for the fixes
from odf.teletype import extractText
from odf.namespaces import OFFICENS
from odf.opendocument import load
import pyexcel_io.service as service
from pyexcel_io.book import BookReader
from pyexcel_io.sheet import SheetReader
from pyexcel_io._compact import OrderedDict
class ODSSheet(SheetReader):
"""native ods sheet"""
def __init__(self, sheet, auto_detect_int=True, **keywords):
SheetReader.__init__(self, sheet, **keywords)
self.__auto_detect_int = auto_detect_int
@property
def name(self):
return self._native_sheet.getAttribute("name")
def row_iterator(self):
return self._native_sheet.getElementsByType(TableRow)
def column_iterator(self, row):
cells = row.getElementsByType(TableCell)
for cell in cells:
repeat = cell.getAttribute("numbercolumnsrepeated")
cell_value = self.__read_cell(cell)
if repeat:
number_of_repeat = int(repeat)
for i in range(number_of_repeat):
yield cell_value
else:
yield cell_value
def __read_cell(self, cell):
cell_type = cell.getAttrNS(OFFICENS, "value-type")
value_token = service.VALUE_TOKEN.get(cell_type, "value")
ret = None
if cell_type == "string":
text_content = self.__read_text_cell(cell)
ret = text_content
elif cell_type == "currency":
value = cell.getAttrNS(OFFICENS, value_token)
currency = cell.getAttrNS(OFFICENS, cell_type)
if currency:
ret = value + " " + currency
else:
ret = value
else:
if cell_type in service.VALUE_CONVERTERS:
value = cell.getAttrNS(OFFICENS, value_token)
n_value = service.VALUE_CONVERTERS[cell_type](value)
if cell_type == "float" and self.__auto_detect_int:
if service.has_no_digits_in_float(n_value):
n_value = int(n_value)
ret = n_value
else:
text_content = self.__read_text_cell(cell)
ret = text_content
return ret
def __read_text_cell(self, cell):
text_content = []
paragraphs = cell.getElementsByType(P)
# for each text node
for paragraph in paragraphs:
name_space, tag = paragraph.parentNode.qname
if tag != str("annotation"):
data = extractText(paragraph)
text_content.append(data)
return "\n".join(text_content)
class ODSBook(BookReader):
"""read ods book"""
def open(self, file_name, **keywords):
"""open ods file"""
BookReader.open(self, file_name, **keywords)
self._load_from_file()
def open_stream(self, file_stream, **keywords):
"""open ods file stream"""
BookReader.open_stream(self, file_stream, **keywords)
self._load_from_memory()
def read_sheet_by_name(self, sheet_name):
"""read a named sheet"""
tables = self._native_book.spreadsheet.getElementsByType(Table)
rets = [
table
for table in tables
if table.getAttribute("name") == sheet_name
]
if len(rets) == 0:
raise ValueError("%s cannot be found" % sheet_name)
else:
return self.read_sheet(rets[0])
def read_sheet_by_index(self, sheet_index):
"""read a sheet at a specified index"""
tables = self._native_book.spreadsheet.getElementsByType(Table)
length = len(tables)
if sheet_index < length:
return self.read_sheet(tables[sheet_index])
else:
raise IndexError(
"Index %d of out bound %d" % (sheet_index, length)
)
def read_all(self):
"""read all sheets"""
result = OrderedDict()
for sheet in self._native_book.spreadsheet.getElementsByType(Table):
ods_sheet = ODSSheet(sheet, **self._keywords)
result[ods_sheet.name] = ods_sheet.to_array()
return result
def read_sheet(self, native_sheet):
"""read one native sheet"""
sheet = ODSSheet(native_sheet, **self._keywords)
return {sheet.name: sheet.to_array()}
def close(self):
self._native_book = None
def _load_from_memory(self):
self._native_book = load(self._file_stream)
def _load_from_file(self):
self._native_book = load(self._file_name)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
5d1f7611d21754e12f876d8d2824ff8575e53efb
|
4215ff4aa3383f2145c416a9f5a0501f70445555
|
/xxxy/Day_two/multi_process/process_cuiqinghua_crawl_mysql.py
|
bf8f433bb7a2316c412f8131948310029c7addc6
|
[] |
no_license
|
spareribs/scrapy-examples
|
abb93ce217b87af683d222e01de4de559e5288fd
|
ecb41e28bdfb0814ee90b2df2202562ab9f4a20f
|
refs/heads/master
| 2021-01-20T03:29:08.888312
| 2019-03-27T05:29:15
| 2019-03-27T05:29:15
| 89,544,769
| 9
| 7
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,822
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# @Time : 2017/6/21 20:01
# @Author : Spareribs
# @File : process_cuiqinghua_crawl.py
# @Notice : 这是使用宽度优先算法BSF实现的全站爬取的爬虫 - 多进程
"""
import os
import urllib2
from collections import deque
import json
from lxml import etree
import httplib
import hashlib
from pybloom import BloomFilter
import thread
import threading
import time
from process_mysqlmgr import CrawlDatabaseManager
from mysql.connector import errorcode
import mysql.connector
request_headers = {
'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36",
}
def getPageContent(now_url, index, now_depth):
print "【Download】正在下载网址 {0} 当前深度为 {1}".format(now_url, now_depth)
max_depth = 2 # 爬取深度
try:
# 使用urllib库请求now_url地址,将页面通过read方法读取下来
req = urllib2.Request(now_url, headers=request_headers)
response = urllib2.urlopen(req)
html_page = response.read()
filename = now_url[7:].replace('/', '_') # 处理URL信息,去掉"http://",将/替换成_
# 将获取到的页面写入到文件中
fo = open("{0}{1}.html".format(dir_name, filename), 'wb+')
fo.write(html_page)
fo.close()
mysqlmgr.finishUrl(index) # 【dbmanager.finishUrl】当完成下载的时候通过行级锁将当前的url的status设置为done
# 处理各种异常情况
except urllib2.HTTPError, Arguments:
print "【Error】:HTTPError {0}\n".format(Arguments)
return
except httplib.BadStatusLine, Arguments:
print "【Error】:BadStatusLine {0}\n".format(Arguments)
return
except IOError, Arguments:
print "【Error】:IOError {0}\n".format(Arguments)
return
except Exception, Arguments:
print "【Error】:Exception {0}\n".format(Arguments)
return
# 解析页面,获取当前页面中所有的URL
try:
html = etree.HTML(html_page.lower().decode('utf-8'))
hrefs = html.xpath(u"//a")
for href in hrefs:
# 用于处理xpath抓取到的href,获取有用的
try:
if 'href' in href.attrib:
val = href.attrib['href']
if val.find('javascript:') != -1: # 过滤掉类似"javascript:void(0)"
continue
if val.startswith('http://') is False: # 给"/mdd/calendar/"做拼接
if val.startswith('/'):
val = 'http://cuiqingcai.com'.format(val)
else:
continue
if val[-1] == '/': # 过滤掉末尾的/
val = val[0:-1]
if now_depth + 1 == max_depth: # 如果深度与设定的最大深度相等,不加入数据库
break
else:
mysqlmgr.enqueueUrl(val, now_depth + 1) # 【dbmanager.enqueueUrl】将url加入到数据库中
except ValueError:
continue
except UnicodeDecodeError: # 处理utf-8编码无法解析的异常情况
pass
# 实例化一个数据库操作对象(功能与queue类似),并指定指定最大的进程数
max_num_thread = 10
mysqlmgr = CrawlDatabaseManager(max_num_thread)
# 记录文件(存放下载的HTML页面)
dir_name = 'test_cuiqingcai/'
if not os.path.exists(dir_name): # 检查用于存储网页文件夹是否存在,不存在则创建
os.makedirs(dir_name)
# put first page into queue
mysqlmgr.enqueueUrl("http://cuiqingcai.com/", 0) # 将首页面存入数据库
start_time = time.time() # 记录开始时间爱你
is_root_page = True # 标记首页
threads = [] # 创建进程池
CRAWL_DELAY = 0 # 设置超时,控制下载的速率,避免太过频繁访问目标网站,但目标网站没有这个限制
while True:
curtask = mysqlmgr.dequeueUrl() # 【dbmanager.dequeueUrl】将url加入到数据库中获取一个status为new的url
if curtask is None:
for t in threads: # join方法,等待所有线程结束以后再继续执行【等待子进程结束,再退出主进程】
t.join()
break
# looking for an empty thread from pool to crawl
if is_root_page is True: # 修改根目录URL的标记,并抓取首页,让数据库里面有初始数据
getPageContent(curtask['url'], curtask['index'], curtask['depth'])
is_root_page = False
else:
while True:
for t in threads: # 处理掉异常而终止【非存活】的进程
if not t.is_alive():
threads.remove(t)
if len(threads) >= max_num_thread: # 如果当前线程大于预设值,continue不执行后面的代码,继续循环,知道小于预设值
# time.sleep(CRAWL_DELAY)
continue # len(threads) >= max_threads的判断左右
try: # 创建线程 加入线程池 并启动线程
t = threading.Thread(target=getPageContent, name=None,
args=(curtask['url'], curtask['index'], curtask['depth']))
threads.append(t) # 将线程加入线程池中
t.setDaemon(True) # 设置 Ctrl-C 能退出thread的爬取【不等待子进程结束,直接退出主进程】
t.start() # 启动线程
# time.sleep(CRAWL_DELAY)
break
except Exception:
print "【Error】: 不能启动thread"
print '【End】 花费时间 {0:.2f} 秒'.format(time.time() - start_time)
|
[
"370835062@qq.com"
] |
370835062@qq.com
|
0d1f402739ad994b6d9b7d80a0dff023821abdc9
|
35245b4d66310c90da4f5d2cf5f2792714b281ac
|
/Django/waterwatchapp/management/commands/test_db_connection.py
|
f31e9fa386f31d65994a443aaec9e8f8818604e9
|
[] |
no_license
|
JaschaMuller/web_app_base
|
7e2538335e4302245fa3d1166f4667b33aa71ed1
|
7b33a5be36febe2b65ae1e02aa4b50717959472f
|
refs/heads/master
| 2023-08-31T00:25:55.642478
| 2021-10-09T20:08:14
| 2021-10-09T20:08:14
| 318,662,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 434
|
py
|
from django.core.management.base import BaseCommand
from django.db import connections
from django.db.utils import OperationalError
class Command(BaseCommand):
def handle(self, *args, **options):
db_conn = connections['default']
try:
c = db_conn.cursor()
except OperationalError:
connected = False
print('-->DB connection Failed<--')
else:
connected = True
print('-->DB connection Suceeded<--')
|
[
"jascha.muller@gmail.com"
] |
jascha.muller@gmail.com
|
f013402f3952e50353dfcc3d35f383e3cc53f977
|
7ed67ecfa65ee6c770689e6bf45b75ed817713c8
|
/MatrixMultiplication/matrixmultiplication.py
|
ff83425b1324a3fc63704a632dfaf281cbc99045
|
[] |
no_license
|
rendaixu/CompositionMR
|
fa0402be85a1d8bee40ef23bc412aa198b98f2c9
|
2d655729a3ad6d68b19ff55c403b3ddffcb859e5
|
refs/heads/master
| 2021-09-13T18:41:41.514542
| 2018-04-28T06:12:34
| 2018-04-28T06:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,553
|
py
|
import sys
def trim(c, jc):
index = 0
for i in c:
if i == 0:
break
else:
index = index + 1
c = c[0: index]
jc = jc[0: index]
return c, jc
def Normalization(n,m,c,ic,jc):
result_c = []
nz = 0
for i in range(n):
temp = []
for j in range(m):
temp.append(0)
result_c.append(temp)
for i in range(n):
for j in range(ic[i], ic[i+1]):
result_c[i][jc[nz]] = c[nz]
nz = nz+1
return result_c
def SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij*b[k]
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_1_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
#nz = nz
else:
c[icol]=c[icol]+aij*b[k]
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_2_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij*b[k]
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_3_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij*b[k]
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_4_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_5_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+b[k]
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_6_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij+b[k] # this line is modified from " aij*b[k]"
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_7_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[i] # this line is modified from a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij *b[k]
for k in range(ic[i], nz):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MU_8_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij*b[k]
for k in range(ic[i], nz):
mask[jc[i]] = -1 # this line is modified from jc[k]
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def CreateSparseMat(A):
a = []
ia = [0]
ja = []
off_set = 0
for i in range(len(A)):
for j in range(len(A[0])):
if not A[i][j] == 0:
a.append(A[i][j])
off_set = off_set+1
ja.append(j)
ia.append(off_set)
return (a, ia, ja)
def MU_9_SparseMatMul(n, m, a, ia, ja, b, ib, jb):
nz = 0
mask = []
for i in range(m):
mask.append(-1)
c = []
ic = [0]
jc = []
for i in range(n):
ic.append(0)
for i in range(n*m):
c.append(0)
jc.append(0)
for i in range(0,n):
for j in range(ia[i], ia[i+1]):
neighbour = ja[j]
aij = a[j]
for k in range(ib[neighbour], ib[neighbour+1]):
icol_add = jb[k]
icol = mask[icol_add]
if (icol == -1):
jc[nz] = icol_add
c[nz] = aij * b[k]
mask[icol_add] = nz
nz = nz + 1
else:
c[icol]=c[icol]+aij*b[k]
for k in range(ic[i], n-1):
mask[jc[k]] = -1
ic[i+1] = nz
c, jc = trim(c, jc)
C = Normalization(n, m, c, ic, jc)
return C
def MatMul(A, B):
if not (len(A[0]) == len(B)):
print("Matrix cannot product!\n Matrix production failure, since they do not match.")
sys.exit(-1)
product_row = len(A)
product_col = len(B[0])
(a,ia,ja)= CreateSparseMat(A)
(b,ib,jb)=CreateSparseMat(B)
C = SparseMatMul(product_row,product_col,a,ia,ja, b,ib,jb)
return C
class MatrixMultiple():
def __init__(self,mutant_index):
self.mutant_index = mutant_index
def MatMul(self, A, B):
if not (len(A[0]) == len(B)):
print("Matrix cannot product!\n Matrix production failure, since they do not match.")
sys.exit(-1)
product_row = len(A)
product_col = len(B[0])
(a, ia, ja) = CreateSparseMat(A)
(b, ib, jb) = CreateSparseMat(B)
current_module = sys.modules[__name__]
C = getattr(current_module, self.mutant_index)(product_row, product_col, a, ia, ja, b, ib, jb)
return C
if __name__ =="__main__":
A = [[1, 7, 0, 0],[0, 2, 8, 0],[5, 0, 3, 9],[0, 6, 0, 4]]
#(a, ia, ja) = CreateSparseMat(A)
#print(a, ia, ja)
#c, ic, jc = SparseMatMul(4, 4, a,ia,ja, a, ia, ja)
#normal_c = Normalization(4,4, c,ic,jc)
#print(normal_c)
print(MatMul(A, A))
|
[
"qiukun11@sina.com"
] |
qiukun11@sina.com
|
925910732b6f517b22a530362178bdbe84f9ceeb
|
072b36de31ecd906a14de7cf8434546302f51657
|
/intent_detection/encoder_clients.py
|
05b6ae3305b9e14d89716fc2edf94124b3adae4f
|
[
"Apache-2.0"
] |
permissive
|
solversa/polyai-models
|
a9b991e1bc567097a45ea48fc13113449211dbba
|
5ea0f961270e8f04c24bd3f3f1cf30cddb8268f2
|
refs/heads/master
| 2022-11-06T11:16:28.866610
| 2020-06-24T16:53:19
| 2020-06-24T16:53:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 13,493
|
py
|
"""Sentence encoder library for tensorflow_hub based sentence encoders
The included sentence encoders are:
- BERT: https://arxiv.org/abs/1810.04805
- USE multilingual: https://arxiv.org/abs/1907.04307
- ConveRT: https://arxiv.org/abs/1911.03688
Copyright PolyAI Limited.
"""
import abc
import os
import pickle
import glog
import numpy as np
import tensorflow as tf
import tensorflow_hub as tf_hub
import tensorflow_text # NOQA: it is used when importing ConveRT.
import tf_sentencepiece # NOQA: it is used when importing USE.
from bert.tokenization import FullTokenizer
from tqdm import tqdm
from encoder_client import EncoderClient
_CONVERT_PATH = "http://models.poly-ai.com/convert/v1/model.tar.gz"
_USE_PATH = ("https://tfhub.dev/google/universal-sentence-encoder-"
"multilingual-large/1")
_BERT_PATH = "https://tfhub.dev/google/bert_uncased_L-24_H-1024_A-16/1"
def l2_normalize(encodings):
"""L2 normalizes the given matrix of encodings."""
norms = np.linalg.norm(encodings, ord=2, axis=-1, keepdims=True)
return encodings / norms
class ClassificationEncoderClient(object):
"""A model that maps from text to dense vectors."""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def encode_sentences(self, sentences):
"""Encodes a list of sentences
Args:
sentences: a list of strings
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
return NotImplementedError
def get_encoder_client(encoder_type, cache_dir=None):
"""get an EncoderClient object
Args:
encoder_type: (str) one of "use", "convert", "combined" or "bert"
cache_dir: The directory where an encoding dictionary will be cached
Returns:
a ClassificationEncoderClient
"""
if encoder_type.lower() == "use":
encoder_client = UseEncoderClient(_USE_PATH)
if cache_dir:
encoder_id = _USE_PATH.replace("/", "-")
encoder_client = CachingEncoderClient(
encoder_client, encoder_id, cache_dir)
elif encoder_type.lower() == "convert":
encoder_client = ConvertEncoderClient(_CONVERT_PATH)
if cache_dir:
encoder_id = _CONVERT_PATH.replace("/", "-")
encoder_client = CachingEncoderClient(
encoder_client, encoder_id, cache_dir)
elif encoder_type.lower() == "combined":
use_encoder = UseEncoderClient(_USE_PATH)
convert_encoder = ConvertEncoderClient(_CONVERT_PATH)
if cache_dir:
use_id = _USE_PATH.replace("/", "-")
use_encoder = CachingEncoderClient(
use_encoder, use_id, cache_dir)
convert_id = _CONVERT_PATH.replace("/", "-")
convert_encoder = CachingEncoderClient(
convert_encoder, convert_id, cache_dir)
encoder_client = CombinedEncoderClient([convert_encoder, use_encoder])
elif encoder_type.lower() == "bert":
encoder_client = BertEncoderClient(_BERT_PATH)
if cache_dir:
encoder_id = _BERT_PATH.replace("/", "-")
encoder_client = CachingEncoderClient(
encoder_client, encoder_id, cache_dir)
else:
raise ValueError(f"{encoder_type} is not a valid encoder type")
return encoder_client
class CachingEncoderClient(ClassificationEncoderClient):
"""Wrapper around an encoder to cache the encodings on disk"""
def __init__(self, encoder_client, encoder_id, cache_dir):
"""Create a new CachingEncoderClient object
Args:
encoder_client: An EncoderClient
encoder_id: An unique ID for the encoder
cache_dir: The directory where the encodings will be cached
"""
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
self._encodings_dict_path = os.path.join(
cache_dir, encoder_id)
self._encoder_client = encoder_client
self._encodings_dict = self._load_or_create_encodings_dict()
def _load_or_create_encodings_dict(self):
if os.path.exists(self._encodings_dict_path):
with open(self._encodings_dict_path, "rb") as f:
encodings_dict = pickle.load(f)
else:
encodings_dict = {}
return encodings_dict
def _save_encodings_dict(self):
with open(self._encodings_dict_path, "wb") as f:
pickle.dump(self._encodings_dict, f)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
missing_sentences = [
sentence for sentence in sentences
if sentence not in self._encodings_dict]
if len(sentences) != len(missing_sentences):
glog.info(f"{len(sentences) - len(missing_sentences)} cached "
f"sentences will not be encoded")
if missing_sentences:
missing_encodings = self._encoder_client.encode_sentences(
missing_sentences)
for sentence, encoding in zip(missing_sentences,
missing_encodings):
self._encodings_dict[sentence] = encoding
self._save_encodings_dict()
encodings = np.array(
[self._encodings_dict[sentence] for sentence in sentences])
return encodings
class ConvertEncoderClient(ClassificationEncoderClient):
"""A wrapper around ClassificationEncoderClient to normalise the output"""
def __init__(self, uri, batch_size=100):
"""Create a new ConvertEncoderClient object
Args:
uri: The uri to the tensorflow_hub module
batch_size: maximum number of sentences to encode at once
"""
self._batch_size = batch_size
self._encoder_client = EncoderClient(uri)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
encodings = []
glog.setLevel("ERROR")
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._encoder_client.encode_sentences(
sentences[i:i + self._batch_size]))
glog.setLevel("INFO")
return l2_normalize(np.vstack(encodings))
class UseEncoderClient(ClassificationEncoderClient):
"""A Universal Sentence Encoder model loaded as a tensorflow hub module"""
def __init__(self, uri, batch_size=100):
"""Create a new UseEncoderClient object
Args:
uri: The uri to the tensorflow_hub USE module
batch_size: maximum number of sentences to encode at once
"""
self._batch_size = batch_size
self._session = tf.Session(graph=tf.Graph())
with self._session.graph.as_default():
glog.info("Loading %s model from tensorflow hub", uri)
embed_fn = tf_hub.Module(uri)
self._fed_texts = tf.placeholder(shape=[None], dtype=tf.string)
self._embeddings = embed_fn(self._fed_texts)
encoding_info = embed_fn.get_output_info_dict().get('default')
if encoding_info:
self._encoding_dim = encoding_info.get_shape()[-1].value
init_ops = (
tf.global_variables_initializer(), tf.tables_initializer())
glog.info("Initializing graph.")
self._session.run(init_ops)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an (N, d) numpy matrix of sentence encodings.
"""
encodings = []
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._session.run(
self._embeddings,
{self._fed_texts: sentences[i:i + self._batch_size]}))
return np.vstack(encodings)
class BertEncoderClient(ClassificationEncoderClient):
"""The BERT encoder that is loaded as a module from tensorflow hub.
This class tokenizes the input text using the bert tokenization
library. The final encoding is computed as the sum of the token
embeddings.
Args:
uri: (string) the tensorflow hub URI for the model.
batch_size: maximum number of sentences to encode at once
"""
def __init__(self, uri, batch_size=100):
"""Create a new `BERTEncoder` object."""
if not tf.test.is_gpu_available():
glog.warning(
"No GPU detected, BERT will run a lot slower than with a GPU.")
self._batch_size = batch_size
self._session = tf.Session(graph=tf.Graph())
with self._session.graph.as_default():
glog.info("Loading %s model from tensorflow hub", uri)
embed_fn = tf_hub.Module(uri, trainable=False)
self._tokenizer = self._create_tokenizer_from_hub_module(uri)
self._input_ids = tf.placeholder(
name="input_ids", shape=[None, None], dtype=tf.int32)
self._input_mask = tf.placeholder(
name="input_mask", shape=[None, None], dtype=tf.int32)
self._segment_ids = tf.zeros_like(self._input_ids)
bert_inputs = dict(
input_ids=self._input_ids,
input_mask=self._input_mask,
segment_ids=self._segment_ids
)
embeddings = embed_fn(
inputs=bert_inputs, signature="tokens", as_dict=True)[
"sequence_output"
]
mask = tf.expand_dims(
tf.cast(self._input_mask, dtype=tf.float32), -1)
self._embeddings = tf.reduce_sum(mask * embeddings, axis=1)
init_ops = (
tf.global_variables_initializer(), tf.tables_initializer())
glog.info("Initializing graph.")
self._session.run(init_ops)
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an array with shape (len(sentences), ENCODING_SIZE)
"""
encodings = []
for i in tqdm(range(0, len(sentences), self._batch_size),
"encoding sentence batches"):
encodings.append(
self._session.run(
self._embeddings,
self._feed_dict(sentences[i:i + self._batch_size])))
return l2_normalize(np.vstack(encodings))
@staticmethod
def _create_tokenizer_from_hub_module(uri):
"""Get the vocab file and casing info from the Hub module."""
with tf.Graph().as_default():
bert_module = tf_hub.Module(uri, trainable=False)
tokenization_info = bert_module(
signature="tokenization_info", as_dict=True)
with tf.Session() as sess:
vocab_file, do_lower_case = sess.run(
[
tokenization_info["vocab_file"],
tokenization_info["do_lower_case"]
])
return FullTokenizer(
vocab_file=vocab_file, do_lower_case=do_lower_case)
def _feed_dict(self, texts, max_seq_len=128):
"""Create a feed dict for feeding the texts as input.
This uses dynamic padding so that the maximum sequence length is the
smaller of `max_seq_len` and the longest sequence actually found in the
batch. (The code in `bert.run_classifier` always pads up to the maximum
even if the examples in the batch are all shorter.)
"""
all_ids = []
for text in texts:
tokens = ["[CLS]"] + self._tokenizer.tokenize(text)
# Possibly truncate the tokens.
tokens = tokens[:(max_seq_len - 1)]
tokens.append("[SEP]")
ids = self._tokenizer.convert_tokens_to_ids(tokens)
all_ids.append(ids)
max_seq_len = max(map(len, all_ids))
input_ids = []
input_mask = []
for ids in all_ids:
mask = [1] * len(ids)
# Zero-pad up to the sequence length.
while len(ids) < max_seq_len:
ids.append(0)
mask.append(0)
input_ids.append(ids)
input_mask.append(mask)
return {self._input_ids: input_ids, self._input_mask: input_mask}
class CombinedEncoderClient(ClassificationEncoderClient):
"""concatenates the encodings of several ClassificationEncoderClients
Args:
encoders: A list of ClassificationEncoderClients
"""
def __init__(self, encoders: list):
"""constructor"""
self._encoders = encoders
def encode_sentences(self, sentences):
"""Encode a list of sentences
Args:
sentences: the list of sentences
Returns:
an array with shape (len(sentences), ENCODING_SIZE)
"""
encodings = np.hstack([encoder.encode_sentences(sentences)
for encoder in self._encoders])
return encodings
|
[
"noreply@github.com"
] |
solversa.noreply@github.com
|
84e27abcbf77a8326005c61b2a757da7bacca739
|
5e6d8b9989247801718dd1f10009f0f7f54c1eb4
|
/sdk/python/pulumi_azure_native/kusto/v20210101/attached_database_configuration.py
|
946ae232ce58b45d17af011ef974b0437c9ea1a7
|
[
"BSD-3-Clause",
"Apache-2.0"
] |
permissive
|
vivimouret29/pulumi-azure-native
|
d238a8f91688c9bf09d745a7280b9bf2dd6d44e0
|
1cbd988bcb2aa75a83e220cb5abeb805d6484fce
|
refs/heads/master
| 2023-08-26T05:50:40.560691
| 2021-10-21T09:25:07
| 2021-10-21T09:25:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 18,081
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['AttachedDatabaseConfigurationArgs', 'AttachedDatabaseConfiguration']
@pulumi.input_type
class AttachedDatabaseConfigurationArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
cluster_resource_id: pulumi.Input[str],
database_name: pulumi.Input[str],
default_principals_modification_kind: pulumi.Input[Union[str, 'DefaultPrincipalsModificationKind']],
resource_group_name: pulumi.Input[str],
attached_database_configuration_name: Optional[pulumi.Input[str]] = None,
location: Optional[pulumi.Input[str]] = None,
table_level_sharing_properties: Optional[pulumi.Input['TableLevelSharingPropertiesArgs']] = None):
"""
The set of arguments for constructing a AttachedDatabaseConfiguration resource.
:param pulumi.Input[str] cluster_name: The name of the Kusto cluster.
:param pulumi.Input[str] cluster_resource_id: The resource id of the cluster where the databases you would like to attach reside.
:param pulumi.Input[str] database_name: The name of the database which you would like to attach, use * if you want to follow all current and future databases.
:param pulumi.Input[Union[str, 'DefaultPrincipalsModificationKind']] default_principals_modification_kind: The default principals modification kind
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the Kusto cluster.
:param pulumi.Input[str] attached_database_configuration_name: The name of the attached database configuration.
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input['TableLevelSharingPropertiesArgs'] table_level_sharing_properties: Table level sharing specifications
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "cluster_resource_id", cluster_resource_id)
pulumi.set(__self__, "database_name", database_name)
pulumi.set(__self__, "default_principals_modification_kind", default_principals_modification_kind)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if attached_database_configuration_name is not None:
pulumi.set(__self__, "attached_database_configuration_name", attached_database_configuration_name)
if location is not None:
pulumi.set(__self__, "location", location)
if table_level_sharing_properties is not None:
pulumi.set(__self__, "table_level_sharing_properties", table_level_sharing_properties)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
The name of the Kusto cluster.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="clusterResourceId")
def cluster_resource_id(self) -> pulumi.Input[str]:
"""
The resource id of the cluster where the databases you would like to attach reside.
"""
return pulumi.get(self, "cluster_resource_id")
@cluster_resource_id.setter
def cluster_resource_id(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_resource_id", value)
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Input[str]:
"""
The name of the database which you would like to attach, use * if you want to follow all current and future databases.
"""
return pulumi.get(self, "database_name")
@database_name.setter
def database_name(self, value: pulumi.Input[str]):
pulumi.set(self, "database_name", value)
@property
@pulumi.getter(name="defaultPrincipalsModificationKind")
def default_principals_modification_kind(self) -> pulumi.Input[Union[str, 'DefaultPrincipalsModificationKind']]:
"""
The default principals modification kind
"""
return pulumi.get(self, "default_principals_modification_kind")
@default_principals_modification_kind.setter
def default_principals_modification_kind(self, value: pulumi.Input[Union[str, 'DefaultPrincipalsModificationKind']]):
pulumi.set(self, "default_principals_modification_kind", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group containing the Kusto cluster.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="attachedDatabaseConfigurationName")
def attached_database_configuration_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the attached database configuration.
"""
return pulumi.get(self, "attached_database_configuration_name")
@attached_database_configuration_name.setter
def attached_database_configuration_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "attached_database_configuration_name", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter(name="tableLevelSharingProperties")
def table_level_sharing_properties(self) -> Optional[pulumi.Input['TableLevelSharingPropertiesArgs']]:
"""
Table level sharing specifications
"""
return pulumi.get(self, "table_level_sharing_properties")
@table_level_sharing_properties.setter
def table_level_sharing_properties(self, value: Optional[pulumi.Input['TableLevelSharingPropertiesArgs']]):
pulumi.set(self, "table_level_sharing_properties", value)
class AttachedDatabaseConfiguration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attached_database_configuration_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
cluster_resource_id: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
default_principals_modification_kind: Optional[pulumi.Input[Union[str, 'DefaultPrincipalsModificationKind']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_level_sharing_properties: Optional[pulumi.Input[pulumi.InputType['TableLevelSharingPropertiesArgs']]] = None,
__props__=None):
"""
Class representing an attached database configuration.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] attached_database_configuration_name: The name of the attached database configuration.
:param pulumi.Input[str] cluster_name: The name of the Kusto cluster.
:param pulumi.Input[str] cluster_resource_id: The resource id of the cluster where the databases you would like to attach reside.
:param pulumi.Input[str] database_name: The name of the database which you would like to attach, use * if you want to follow all current and future databases.
:param pulumi.Input[Union[str, 'DefaultPrincipalsModificationKind']] default_principals_modification_kind: The default principals modification kind
:param pulumi.Input[str] location: Resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group containing the Kusto cluster.
:param pulumi.Input[pulumi.InputType['TableLevelSharingPropertiesArgs']] table_level_sharing_properties: Table level sharing specifications
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AttachedDatabaseConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Class representing an attached database configuration.
:param str resource_name: The name of the resource.
:param AttachedDatabaseConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AttachedDatabaseConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
attached_database_configuration_name: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
cluster_resource_id: Optional[pulumi.Input[str]] = None,
database_name: Optional[pulumi.Input[str]] = None,
default_principals_modification_kind: Optional[pulumi.Input[Union[str, 'DefaultPrincipalsModificationKind']]] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
table_level_sharing_properties: Optional[pulumi.Input[pulumi.InputType['TableLevelSharingPropertiesArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AttachedDatabaseConfigurationArgs.__new__(AttachedDatabaseConfigurationArgs)
__props__.__dict__["attached_database_configuration_name"] = attached_database_configuration_name
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
if cluster_resource_id is None and not opts.urn:
raise TypeError("Missing required property 'cluster_resource_id'")
__props__.__dict__["cluster_resource_id"] = cluster_resource_id
if database_name is None and not opts.urn:
raise TypeError("Missing required property 'database_name'")
__props__.__dict__["database_name"] = database_name
if default_principals_modification_kind is None and not opts.urn:
raise TypeError("Missing required property 'default_principals_modification_kind'")
__props__.__dict__["default_principals_modification_kind"] = default_principals_modification_kind
__props__.__dict__["location"] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["table_level_sharing_properties"] = table_level_sharing_properties
__props__.__dict__["attached_database_names"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:kusto/v20210101:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-native:kusto:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-nextgen:kusto:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-native:kusto/v20190907:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-nextgen:kusto/v20190907:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-native:kusto/v20191109:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-nextgen:kusto/v20191109:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-native:kusto/v20200215:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-nextgen:kusto/v20200215:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-native:kusto/v20200614:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-nextgen:kusto/v20200614:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-native:kusto/v20200918:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-nextgen:kusto/v20200918:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-native:kusto/v20210827:AttachedDatabaseConfiguration"), pulumi.Alias(type_="azure-nextgen:kusto/v20210827:AttachedDatabaseConfiguration")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(AttachedDatabaseConfiguration, __self__).__init__(
'azure-native:kusto/v20210101:AttachedDatabaseConfiguration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'AttachedDatabaseConfiguration':
"""
Get an existing AttachedDatabaseConfiguration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = AttachedDatabaseConfigurationArgs.__new__(AttachedDatabaseConfigurationArgs)
__props__.__dict__["attached_database_names"] = None
__props__.__dict__["cluster_resource_id"] = None
__props__.__dict__["database_name"] = None
__props__.__dict__["default_principals_modification_kind"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["table_level_sharing_properties"] = None
__props__.__dict__["type"] = None
return AttachedDatabaseConfiguration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="attachedDatabaseNames")
def attached_database_names(self) -> pulumi.Output[Sequence[str]]:
"""
The list of databases from the clusterResourceId which are currently attached to the cluster.
"""
return pulumi.get(self, "attached_database_names")
@property
@pulumi.getter(name="clusterResourceId")
def cluster_resource_id(self) -> pulumi.Output[str]:
"""
The resource id of the cluster where the databases you would like to attach reside.
"""
return pulumi.get(self, "cluster_resource_id")
@property
@pulumi.getter(name="databaseName")
def database_name(self) -> pulumi.Output[str]:
"""
The name of the database which you would like to attach, use * if you want to follow all current and future databases.
"""
return pulumi.get(self, "database_name")
@property
@pulumi.getter(name="defaultPrincipalsModificationKind")
def default_principals_modification_kind(self) -> pulumi.Output[str]:
"""
The default principals modification kind
"""
return pulumi.get(self, "default_principals_modification_kind")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The provisioned state of the resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="tableLevelSharingProperties")
def table_level_sharing_properties(self) -> pulumi.Output[Optional['outputs.TableLevelSharingPropertiesResponse']]:
"""
Table level sharing specifications
"""
return pulumi.get(self, "table_level_sharing_properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
|
[
"noreply@github.com"
] |
vivimouret29.noreply@github.com
|
e621edac8b6236ada83c404b082d7857b31371de
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/carbon/common/lib/jinja2/lexer.py
|
e5a21bb93528b239c8fe57cd40d85f1a50ddb173
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 17,748
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\carbon\common\lib\jinja2\lexer.py
import re
from operator import itemgetter
from collections import deque
from jinja2.exceptions import TemplateSyntaxError
from jinja2.utils import LRUCache, next
_lexer_cache = LRUCache(50)
whitespace_re = re.compile('\\s+', re.U)
string_re = re.compile('(\'([^\'\\\\]*(?:\\\\.[^\'\\\\]*)*)\'|"([^"\\\\]*(?:\\\\.[^"\\\\]*)*)")', re.S)
integer_re = re.compile('\\d+')
try:
compile('f\xc3\xb6\xc3\xb6', '<unknown>', 'eval')
except SyntaxError:
name_re = re.compile('\\b[a-zA-Z_][a-zA-Z0-9_]*\\b')
else:
from jinja2 import _stringdefs
name_re = re.compile('[%s][%s]*' % (_stringdefs.xid_start, _stringdefs.xid_continue))
float_re = re.compile('(?<!\\.)\\d+\\.\\d+')
newline_re = re.compile('(\\r\\n|\\r|\\n)')
TOKEN_ADD = intern('add')
TOKEN_ASSIGN = intern('assign')
TOKEN_COLON = intern('colon')
TOKEN_COMMA = intern('comma')
TOKEN_DIV = intern('div')
TOKEN_DOT = intern('dot')
TOKEN_EQ = intern('eq')
TOKEN_FLOORDIV = intern('floordiv')
TOKEN_GT = intern('gt')
TOKEN_GTEQ = intern('gteq')
TOKEN_LBRACE = intern('lbrace')
TOKEN_LBRACKET = intern('lbracket')
TOKEN_LPAREN = intern('lparen')
TOKEN_LT = intern('lt')
TOKEN_LTEQ = intern('lteq')
TOKEN_MOD = intern('mod')
TOKEN_MUL = intern('mul')
TOKEN_NE = intern('ne')
TOKEN_PIPE = intern('pipe')
TOKEN_POW = intern('pow')
TOKEN_RBRACE = intern('rbrace')
TOKEN_RBRACKET = intern('rbracket')
TOKEN_RPAREN = intern('rparen')
TOKEN_SEMICOLON = intern('semicolon')
TOKEN_SUB = intern('sub')
TOKEN_TILDE = intern('tilde')
TOKEN_WHITESPACE = intern('whitespace')
TOKEN_FLOAT = intern('float')
TOKEN_INTEGER = intern('integer')
TOKEN_NAME = intern('name')
TOKEN_STRING = intern('string')
TOKEN_OPERATOR = intern('operator')
TOKEN_BLOCK_BEGIN = intern('block_begin')
TOKEN_BLOCK_END = intern('block_end')
TOKEN_VARIABLE_BEGIN = intern('variable_begin')
TOKEN_VARIABLE_END = intern('variable_end')
TOKEN_RAW_BEGIN = intern('raw_begin')
TOKEN_RAW_END = intern('raw_end')
TOKEN_COMMENT_BEGIN = intern('comment_begin')
TOKEN_COMMENT_END = intern('comment_end')
TOKEN_COMMENT = intern('comment')
TOKEN_LINESTATEMENT_BEGIN = intern('linestatement_begin')
TOKEN_LINESTATEMENT_END = intern('linestatement_end')
TOKEN_LINECOMMENT_BEGIN = intern('linecomment_begin')
TOKEN_LINECOMMENT_END = intern('linecomment_end')
TOKEN_LINECOMMENT = intern('linecomment')
TOKEN_DATA = intern('data')
TOKEN_INITIAL = intern('initial')
TOKEN_EOF = intern('eof')
operators = {'+': TOKEN_ADD,
'-': TOKEN_SUB,
'/': TOKEN_DIV,
'//': TOKEN_FLOORDIV,
'*': TOKEN_MUL,
'%': TOKEN_MOD,
'**': TOKEN_POW,
'~': TOKEN_TILDE,
'[': TOKEN_LBRACKET,
']': TOKEN_RBRACKET,
'(': TOKEN_LPAREN,
')': TOKEN_RPAREN,
'{': TOKEN_LBRACE,
'}': TOKEN_RBRACE,
'==': TOKEN_EQ,
'!=': TOKEN_NE,
'>': TOKEN_GT,
'>=': TOKEN_GTEQ,
'<': TOKEN_LT,
'<=': TOKEN_LTEQ,
'=': TOKEN_ASSIGN,
'.': TOKEN_DOT,
':': TOKEN_COLON,
'|': TOKEN_PIPE,
',': TOKEN_COMMA,
';': TOKEN_SEMICOLON}
reverse_operators = dict([ (v, k) for k, v in operators.iteritems() ])
operator_re = re.compile('(%s)' % '|'.join((re.escape(x) for x in sorted(operators, key=lambda x: -len(x)))))
ignored_tokens = frozenset([TOKEN_COMMENT_BEGIN,
TOKEN_COMMENT,
TOKEN_COMMENT_END,
TOKEN_WHITESPACE,
TOKEN_WHITESPACE,
TOKEN_LINECOMMENT_BEGIN,
TOKEN_LINECOMMENT_END,
TOKEN_LINECOMMENT])
ignore_if_empty = frozenset([TOKEN_WHITESPACE,
TOKEN_DATA,
TOKEN_COMMENT,
TOKEN_LINECOMMENT])
def _describe_token_type(token_type):
if token_type in reverse_operators:
return reverse_operators[token_type]
return {TOKEN_COMMENT_BEGIN: 'begin of comment',
TOKEN_COMMENT_END: 'end of comment',
TOKEN_COMMENT: 'comment',
TOKEN_LINECOMMENT: 'comment',
TOKEN_BLOCK_BEGIN: 'begin of statement block',
TOKEN_BLOCK_END: 'end of statement block',
TOKEN_VARIABLE_BEGIN: 'begin of print statement',
TOKEN_VARIABLE_END: 'end of print statement',
TOKEN_LINESTATEMENT_BEGIN: 'begin of line statement',
TOKEN_LINESTATEMENT_END: 'end of line statement',
TOKEN_DATA: 'template data / text',
TOKEN_EOF: 'end of template'}.get(token_type, token_type)
def describe_token(token):
if token.type == 'name':
return token.value
return _describe_token_type(token.type)
def describe_token_expr(expr):
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type)
def count_newlines(value):
return len(newline_re.findall(value))
def compile_rules(environment):
e = re.escape
rules = [(len(environment.comment_start_string), 'comment', e(environment.comment_start_string)), (len(environment.block_start_string), 'block', e(environment.block_start_string)), (len(environment.variable_start_string), 'variable', e(environment.variable_start_string))]
if environment.line_statement_prefix is not None:
rules.append((len(environment.line_statement_prefix), 'linestatement', '^\\s*' + e(environment.line_statement_prefix)))
if environment.line_comment_prefix is not None:
rules.append((len(environment.line_comment_prefix), 'linecomment', '(?:^|(?<=\\S))[^\\S\\r\\n]*' + e(environment.line_comment_prefix)))
return [ x[1:] for x in sorted(rules, reverse=True) ]
class Failure(object):
def __init__(self, message, cls = TemplateSyntaxError):
self.message = message
self.error_class = cls
def __call__(self, lineno, filename):
raise self.error_class(self.message, lineno, filename)
class Token(tuple):
__slots__ = ()
lineno, type, value = (property(itemgetter(x)) for x in range(3))
def __new__(cls, lineno, type, value):
return tuple.__new__(cls, (lineno, intern(str(type)), value))
def __str__(self):
if self.type in reverse_operators:
return reverse_operators[self.type]
if self.type == 'name':
return self.value
return self.type
def test(self, expr):
if self.type == expr:
return True
if ':' in expr:
return expr.split(':', 1) == [self.type, self.value]
return False
def test_any(self, *iterable):
for expr in iterable:
if self.test(expr):
return True
return False
def __repr__(self):
return 'Token(%r, %r, %r)' % (self.lineno, self.type, self.value)
class TokenStreamIterator(object):
def __init__(self, stream):
self.stream = stream
def __iter__(self):
return self
def next(self):
token = self.stream.current
if token.type is TOKEN_EOF:
self.stream.close()
raise StopIteration()
next(self.stream)
return token
class TokenStream(object):
def __init__(self, generator, name, filename):
self._next = iter(generator).next
self._pushed = deque()
self.name = name
self.filename = filename
self.closed = False
self.current = Token(1, TOKEN_INITIAL, '')
next(self)
def __iter__(self):
return TokenStreamIterator(self)
def __nonzero__(self):
return bool(self._pushed) or self.current.type is not TOKEN_EOF
eos = property(lambda x: not x, doc='Are we at the end of the stream?')
def push(self, token):
self._pushed.append(token)
def look(self):
old_token = next(self)
result = self.current
self.push(result)
self.current = old_token
return result
def skip(self, n = 1):
for x in xrange(n):
next(self)
def next_if(self, expr):
if self.current.test(expr):
return next(self)
def skip_if(self, expr):
return self.next_if(expr) is not None
def next(self):
rv = self.current
if self._pushed:
self.current = self._pushed.popleft()
elif self.current.type is not TOKEN_EOF:
try:
self.current = self._next()
except StopIteration:
self.close()
return rv
def close(self):
self.current = Token(self.current.lineno, TOKEN_EOF, '')
self._next = None
self.closed = True
def expect(self, expr):
if not self.current.test(expr):
expr = describe_token_expr(expr)
if self.current.type is TOKEN_EOF:
raise TemplateSyntaxError('unexpected end of template, expected %r.' % expr, self.current.lineno, self.name, self.filename)
raise TemplateSyntaxError('expected token %r, got %r' % (expr, describe_token(self.current)), self.current.lineno, self.name, self.filename)
try:
return self.current
finally:
next(self)
def get_lexer(environment):
key = (environment.block_start_string,
environment.block_end_string,
environment.variable_start_string,
environment.variable_end_string,
environment.comment_start_string,
environment.comment_end_string,
environment.line_statement_prefix,
environment.line_comment_prefix,
environment.trim_blocks,
environment.newline_sequence)
lexer = _lexer_cache.get(key)
if lexer is None:
lexer = Lexer(environment)
_lexer_cache[key] = lexer
return lexer
class Lexer(object):
def __init__(self, environment):
c = lambda x: re.compile(x, re.M | re.S)
e = re.escape
tag_rules = [(whitespace_re, TOKEN_WHITESPACE, None),
(float_re, TOKEN_FLOAT, None),
(integer_re, TOKEN_INTEGER, None),
(name_re, TOKEN_NAME, None),
(string_re, TOKEN_STRING, None),
(operator_re, TOKEN_OPERATOR, None)]
root_tag_rules = compile_rules(environment)
block_suffix_re = environment.trim_blocks and '\\n?' or ''
self.newline_sequence = environment.newline_sequence
self.rules = {'root': [(c('(.*?)(?:%s)' % '|'.join(['(?P<raw_begin>(?:\\s*%s\\-|%s)\\s*raw\\s*(?:\\-%s\\s*|%s))' % (e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string))] + [ '(?P<%s_begin>\\s*%s\\-|%s)' % (n, r, r) for n, r in root_tag_rules ])), (TOKEN_DATA, '#bygroup'), '#bygroup'), (c('.+'), TOKEN_DATA, None)],
TOKEN_COMMENT_BEGIN: [(c('(.*?)((?:\\-%s\\s*|%s)%s)' % (e(environment.comment_end_string), e(environment.comment_end_string), block_suffix_re)), (TOKEN_COMMENT, TOKEN_COMMENT_END), '#pop'), (c('(.)'), (Failure('Missing end of comment tag'),), None)],
TOKEN_BLOCK_BEGIN: [(c('(?:\\-%s\\s*|%s)%s' % (e(environment.block_end_string), e(environment.block_end_string), block_suffix_re)), TOKEN_BLOCK_END, '#pop')] + tag_rules,
TOKEN_VARIABLE_BEGIN: [(c('\\-%s\\s*|%s' % (e(environment.variable_end_string), e(environment.variable_end_string))), TOKEN_VARIABLE_END, '#pop')] + tag_rules,
TOKEN_RAW_BEGIN: [(c('(.*?)((?:\\s*%s\\-|%s)\\s*endraw\\s*(?:\\-%s\\s*|%s%s))' % (e(environment.block_start_string),
e(environment.block_start_string),
e(environment.block_end_string),
e(environment.block_end_string),
block_suffix_re)), (TOKEN_DATA, TOKEN_RAW_END), '#pop'), (c('(.)'), (Failure('Missing end of raw directive'),), None)],
TOKEN_LINESTATEMENT_BEGIN: [(c('\\s*(\\n|$)'), TOKEN_LINESTATEMENT_END, '#pop')] + tag_rules,
TOKEN_LINECOMMENT_BEGIN: [(c('(.*?)()(?=\\n|$)'), (TOKEN_LINECOMMENT, TOKEN_LINECOMMENT_END), '#pop')]}
def _normalize_newlines(self, value):
return newline_re.sub(self.newline_sequence, value)
def tokenize(self, source, name = None, filename = None, state = None):
stream = self.tokeniter(source, name, filename, state)
return TokenStream(self.wrap(stream, name, filename), name, filename)
def wrap(self, stream, name = None, filename = None):
for lineno, token, value in stream:
if token in ignored_tokens:
continue
elif token == 'linestatement_begin':
token = 'block_begin'
elif token == 'linestatement_end':
token = 'block_end'
elif token in ('raw_begin', 'raw_end'):
continue
elif token == 'data':
value = self._normalize_newlines(value)
elif token == 'keyword':
token = value
elif token == 'name':
value = str(value)
elif token == 'string':
try:
value = self._normalize_newlines(value[1:-1]).encode('ascii', 'backslashreplace').decode('unicode-escape')
except Exception as e:
msg = str(e).split(':')[-1].strip()
raise TemplateSyntaxError(msg, lineno, name, filename)
try:
value = str(value)
except UnicodeError:
pass
elif token == 'integer':
value = int(value)
elif token == 'float':
value = float(value)
elif token == 'operator':
token = operators[value]
yield Token(lineno, token, value)
def tokeniter(self, source, name, filename = None, state = None):
source = '\n'.join(unicode(source).splitlines())
pos = 0
lineno = 1
stack = ['root']
if state is not None and state != 'root':
stack.append(state + '_begin')
else:
state = 'root'
statetokens = self.rules[stack[-1]]
source_length = len(source)
balancing_stack = []
while 1:
for regex, tokens, new_state in statetokens:
m = regex.match(source, pos)
if m is None:
continue
if balancing_stack and tokens in ('variable_end', 'block_end', 'linestatement_end'):
continue
if isinstance(tokens, tuple):
for idx, token in enumerate(tokens):
if token.__class__ is Failure:
raise token(lineno, filename)
elif token == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
yield (lineno, key, value)
lineno += value.count('\n')
break
else:
raise RuntimeError('%r wanted to resolve the token dynamically but no group matched' % regex)
else:
data = m.group(idx + 1)
if data or token not in ignore_if_empty:
yield (lineno, token, data)
lineno += data.count('\n')
else:
data = m.group()
if tokens == 'operator':
if data == '{':
balancing_stack.append('}')
elif data == '(':
balancing_stack.append(')')
elif data == '[':
balancing_stack.append(']')
elif data in ('}', ')', ']'):
if not balancing_stack:
raise TemplateSyntaxError("unexpected '%s'" % data, lineno, name, filename)
expected_op = balancing_stack.pop()
if expected_op != data:
raise TemplateSyntaxError("unexpected '%s', expected '%s'" % (data, expected_op), lineno, name, filename)
if data or tokens not in ignore_if_empty:
yield (lineno, tokens, data)
lineno += data.count('\n')
pos2 = m.end()
if new_state is not None:
if new_state == '#pop':
stack.pop()
elif new_state == '#bygroup':
for key, value in m.groupdict().iteritems():
if value is not None:
stack.append(key)
break
else:
raise RuntimeError('%r wanted to resolve the new state dynamically but no group matched' % regex)
else:
stack.append(new_state)
statetokens = self.rules[stack[-1]]
elif pos2 == pos:
raise RuntimeError('%r yielded empty string without stack change' % regex)
pos = pos2
break
else:
if pos >= source_length:
return
raise TemplateSyntaxError('unexpected char %r at %d' % (source[pos], pos), lineno, name, filename)
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
19ec32991e20e05769722e0f500cec98f9848466
|
433a7d9cca052a12e5095d36dfdb5e7126668f2b
|
/backend/inscourse_23147/settings.py
|
9b86d22a2800d2d17d4f6d1faafb2803cc0f60c3
|
[] |
no_license
|
crowdbotics-apps/inscourse-23147
|
e60ee992bce724ff3867d49ed8700a0b32d8c6a8
|
e50764e04e71c0d29e5c3b8a905cdb153d6902a3
|
refs/heads/master
| 2023-01-23T10:54:04.697210
| 2020-12-03T03:45:16
| 2020-12-03T03:45:16
| 318,065,807
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,017
|
py
|
"""
Django settings for inscourse_23147 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
# start fcm_django push notifications
'fcm_django',
# end fcm_django push notifications
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'inscourse_23147.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'inscourse_23147.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {
"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")
}
# end fcm_django push notifications
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
ccae046c0f8494eb003afba5478f44c47ba4bc01
|
9d7f5e04f951884880c4636dc95e4db53893fedf
|
/Tool/KFDataTool/Py/Cheetah/Compiler.py
|
c0ae90d092015e1cdcee17a59f809f13f6582109
|
[] |
no_license
|
Keyf0/KFData
|
78b93907f01a37a1f6ec978587f0280483911ad9
|
46146e5ee8540f0cba2d6cdd0c9ec4f6d69b8930
|
refs/heads/master
| 2022-03-24T19:13:21.353121
| 2019-11-29T18:25:44
| 2019-11-29T18:25:44
| 217,873,055
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 82,519
|
py
|
'''
Compiler classes for Cheetah:
ModuleCompiler aka 'Compiler'
ClassCompiler
MethodCompiler
If you are trying to grok this code start with ModuleCompiler.__init__,
ModuleCompiler.compile, and ModuleCompiler.__getattr__.
'''
import sys
import os
import os.path
from os.path import getmtime
import re
import time
import random
import warnings
import copy
import codecs
from Cheetah.Version import Version, VersionTuple
from Cheetah.SettingsManager import SettingsManager
from Cheetah.Utils.Indenter import indentize # an undocumented preprocessor
from Cheetah import NameMapper
from Cheetah.Parser import Parser, ParseError, specialVarRE, \
STATIC_CACHE, REFRESH_CACHE, SET_GLOBAL, SET_MODULE, \
unicodeDirectiveRE, encodingDirectiveRE, escapedNewlineRE
from Cheetah.compat import PY2, string_type, unicode
from Cheetah.NameMapper import valueForName, valueFromSearchList, \
valueFromFrameOrSearchList
VFFSL = valueFromFrameOrSearchList
VFSL = valueFromSearchList
VFN = valueForName
currentTime = time.time
class Error(Exception):
pass
# Settings format: (key, default, docstring)
_DEFAULT_COMPILER_SETTINGS = [
('useNameMapper', True,
'Enable NameMapper for dotted notation and searchList support'),
('useSearchList', True,
"Enable the searchList, requires useNameMapper=True, "
"if disabled, first portion of the $variable is a global, builtin, "
"or local variable that doesn't need looking up in the searchList"),
('allowSearchListAsMethArg', True, ''),
('useAutocalling', True,
'Detect and call callable objects in searchList, '
'requires useNameMapper=True'),
('useStackFrames', True,
'Used for NameMapper.valueFromFrameOrSearchList '
'rather than NameMapper.valueFromSearchList'),
('useErrorCatcher', False,
'Turn on the #errorCatcher directive '
'for catching NameMapper errors, etc'),
('alwaysFilterNone', True, 'Filter out None prior to calling the #filter'),
('useFilters', True, 'If False, pass output through str()'),
('includeRawExprInFilterArgs', True, ''),
('useLegacyImportMode', True,
'All #import statements are relocated to the top '
'of the generated Python module'),
('prioritizeSearchListOverSelf', False,
"When iterating the searchList, "
"look into the searchList passed into the initializer "
"instead of Template members first"),
('autoAssignDummyTransactionToSelf', False, ''),
('useKWsDictArgForPassingTrans', True, ''),
('commentOffset', 1, ''),
('outputRowColComments', True, ''),
('includeBlockMarkers', False,
'Wrap #block\'s in a comment in the template\'s output'),
('blockMarkerStart', ('\n<!-- START BLOCK: ', ' -->\n'), ''),
('blockMarkerEnd', ('\n<!-- END BLOCK: ', ' -->\n'), ''),
('defDocStrMsg',
'Autogenerated by Cheetah: The Python-Powered Template Engine', ''),
('setup__str__method', False, ''),
('mainMethodName', 'respond', ''),
('mainMethodNameForSubclasses', 'writeBody', ''),
('indentationStep', ' ' * 4, ''),
('initialMethIndentLevel', 2, ''),
('monitorSrcFile', False, ''),
('outputMethodsBeforeAttributes', True, ''),
('addTimestampsToCompilerOutput', True, ''),
# Customizing the #extends directive
('autoImportForExtendsDirective', True, ''),
('handlerForExtendsDirective', None, ''),
('disabledDirectives', [],
'List of directive keys to disable (without starting "#")'),
('enabledDirectives', [],
'List of directive keys to enable (without starting "#")'),
('disabledDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('postparseDirectiveHooks', [], 'callable(parser, directiveKey)'),
('preparsePlaceholderHooks', [], 'callable(parser)'),
('postparsePlaceholderHooks', [], 'callable(parser)'),
('expressionFilterHooks', [],
'callable(parser, expr, exprType, rawExpr=None, startPos=None), '
'exprType is the name of the directive, "psp" or "placeholder" '
'The filters *must* return the expr or raise an expression, '
'they can modify the expr if needed'),
('templateMetaclass', None,
'Strictly optional, only will work with new-style basecalsses as well'),
('i18NFunctionName', 'self.i18n', ''),
('cheetahVarStartToken', '$', ''),
('commentStartToken', '##', ''),
('multiLineCommentStartToken', '#*', ''),
('multiLineCommentEndToken', '*#', ''),
('gobbleWhitespaceAroundMultiLineComments', True, ''),
('directiveStartToken', '#', ''),
('directiveEndToken', '#', ''),
('allowWhitespaceAfterDirectiveStartToken', False, ''),
('PSPStartToken', '<%', ''),
('PSPEndToken', '%>', ''),
('EOLSlurpToken', '#', ''),
('gettextTokens', ["_", "N_", "ngettext"], ''),
('allowExpressionsInExtendsDirective', False, ''),
('allowEmptySingleLineMethods', False, ''),
('allowNestedDefScopes', True, ''),
('allowPlaceholderFilterArgs', True, ''),
('encoding', None,
'The encoding to read input files as (or None for ASCII)'),
]
DEFAULT_COMPILER_SETTINGS = \
dict([(v[0], v[1]) for v in _DEFAULT_COMPILER_SETTINGS])
class GenUtils(object):
"""An abstract baseclass for the Compiler classes that provides methods that
perform generic utility functions or generate pieces of output code from
information passed in by the Parser baseclass. These methods don't do any
parsing themselves.
"""
def genTimeInterval(self, timeString):
# #@@ TR: need to add some error handling here
if timeString[-1] == 's':
interval = float(timeString[:-1])
elif timeString[-1] == 'm':
interval = float(timeString[:-1])*60 # noqa: E226,E501 missing whitespace around operator
elif timeString[-1] == 'h':
interval = float(timeString[:-1])*60*60 # noqa: E226,E501 missing whitespace around operator
elif timeString[-1] == 'd':
interval = float(timeString[:-1])*60*60*24 # noqa: E226,E501 missing whitespace around operator
elif timeString[-1] == 'w':
interval = float(timeString[:-1])*60*60*24*7 # noqa: E226,E501 missing whitespace around operator
else: # default to minutes
interval = float(timeString)*60 # noqa: E226,E501 missing whitespace around operator
return interval
def genCacheInfo(self, cacheTokenParts):
"""Decipher a placeholder cachetoken
"""
cacheInfo = {}
if cacheTokenParts['REFRESH_CACHE']:
cacheInfo['type'] = REFRESH_CACHE
cacheInfo['interval'] = \
self.genTimeInterval(cacheTokenParts['interval'])
elif cacheTokenParts['STATIC_CACHE']:
cacheInfo['type'] = STATIC_CACHE
return cacheInfo # is empty if no cache
def genCacheInfoFromArgList(self, argList):
cacheInfo = {'type': REFRESH_CACHE}
for key, val in argList:
if val[0] in '"\'':
val = val[1:-1]
if key == 'timer':
key = 'interval'
val = self.genTimeInterval(val)
cacheInfo[key] = val
return cacheInfo
def genCheetahVar(self, nameChunks, plain=False):
if nameChunks[0][0] in self.setting('gettextTokens'):
self.addGetTextVar(nameChunks)
if self.setting('useNameMapper') and not plain:
return self.genNameMapperVar(nameChunks)
else:
return self.genPlainVar(nameChunks)
def addGetTextVar(self, nameChunks):
"""Output something that gettext can recognize.
This is a harmless side effect necessary to make gettext work when it
is scanning compiled templates for strings marked for translation.
@@TR: another marginally more efficient approach would be to put the
output in a dummy method that is never called.
"""
# @@TR: this should be in the compiler not here
self.addChunk("if False:")
self.indent()
self.addChunk(self.genPlainVar(nameChunks[:]))
self.dedent()
def genPlainVar(self, nameChunks):
"""Generate Python code for a Cheetah $var without using NameMapper
(Unified Dotted Notation with the SearchList).
"""
nameChunks.reverse()
chunk = nameChunks.pop()
pythonCode = chunk[0] + chunk[2]
while nameChunks:
chunk = nameChunks.pop()
pythonCode = (pythonCode + '.' + chunk[0] + chunk[2])
return pythonCode
def genNameMapperVar(self, nameChunks):
"""Generate valid Python code for a Cheetah $var, using NameMapper
(Unified Dotted Notation with the SearchList).
nameChunks = list of var subcomponents represented as tuples
[ (name,useAC,remainderOfExpr),
]
where:
name = the dotted name base
useAC = where NameMapper should use autocalling on namemapperPart
remainderOfExpr = any arglist, index, or slice
If remainderOfExpr contains a call arglist (e.g. '(1234)') then useAC
is False, otherwise it defaults to True. It is overridden by the global
setting 'useAutocalling' if this setting is False.
EXAMPLE::
if the raw Cheetah Var is
$a.b.c[1].d().x.y.z
nameChunks is the list
[ ('a.b.c',True,'[1]'), # A
('d',False,'()'), # B
('x.y.z',True,''), # C
]
When this method is fed the list above it returns::
VFN(VFN(VFFSL(SL, 'a.b.c',True)[1], 'd',False)(), 'x.y.z',True)
which can be represented as::
VFN(B`, name=C[0], executeCallables=(useAC and C[1]))C[2]
where::
VFN = NameMapper.valueForName
VFFSL = NameMapper.valueFromFrameOrSearchList
# optionally used instead of VFFSL
VFSL = NameMapper.valueFromSearchList
SL = self.searchList()
useAC = self.setting('useAutocalling') # True in this example
A = ('a.b.c',True,'[1]')
B = ('d',False,'()')
C = ('x.y.z',True,'')
C` = VFN( VFN( VFFSL(SL, 'a.b.c',True)[1],
'd',False)(),
'x.y.z',True)
= VFN(B`, name='x.y.z', executeCallables=True)
B` = VFN(A`, name=B[0], executeCallables=(useAC and B[1]))B[2]
A` = VFFSL(SL, name=A[0], executeCallables=(useAC and A[1]))A[2]
Note, if the compiler setting useStackFrames=False (default is true)
then::
A` = VFSL([locals()] + SL + [globals(), builtin],
name=A[0], executeCallables=(useAC and A[1]))A[2]
This option allows Cheetah to be used with Psyco, which doesn't support
stack frame introspection.
"""
defaultUseAC = self.setting('useAutocalling')
useSearchList = self.setting('useSearchList')
nameChunks.reverse()
name, useAC, remainder = nameChunks.pop()
if not useSearchList:
firstDotIdx = name.find('.')
if firstDotIdx != -1 and firstDotIdx < len(name):
beforeFirstDot = name[:firstDotIdx]
afterDot = name[firstDotIdx+1:] # noqa: E226,E501 missing whitespace around operator
pythonCode = ('VFN(' + beforeFirstDot
+ ',"' + afterDot
+ '",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = name + remainder
elif self.setting('useStackFrames'):
pythonCode = ('VFFSL(SL,'
'"' + name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
else:
pythonCode = ('VFSL([locals()]+SL+[globals(), builtin],'
'"' + name + '",'
+ repr(defaultUseAC and useAC) + ')'
+ remainder)
##
while nameChunks:
name, useAC, remainder = nameChunks.pop()
pythonCode = ('VFN(' + pythonCode
+ ',"' + name
+ '",' + repr(defaultUseAC and useAC) + ')'
+ remainder)
return pythonCode
##################################################
# METHOD COMPILERS
class MethodCompiler(GenUtils):
def __init__(self, methodName, classCompiler,
initialMethodComment=None,
decorators=None):
self._settingsManager = classCompiler
self._classCompiler = classCompiler
self._moduleCompiler = classCompiler._moduleCompiler
self._methodName = methodName
self._initialMethodComment = initialMethodComment
self._setupState()
self._decorators = decorators or []
def setting(self, key):
return self._settingsManager.setting(key)
def _setupState(self):
self._indent = self.setting('indentationStep')
self._indentLev = self.setting('initialMethIndentLevel')
self._pendingStrConstChunks = []
self._methodSignature = None
self._methodDef = None
self._docStringLines = []
self._methodBodyChunks = []
self._cacheRegionsStack = []
self._callRegionsStack = []
self._captureRegionsStack = []
self._filterRegionsStack = []
self._isErrorCatcherOn = False
self._hasReturnStatement = False
self._isGenerator = False
def cleanupState(self):
"""Called by the containing class compiler instance
"""
pass
def methodName(self):
return self._methodName
def setMethodName(self, name):
self._methodName = name
# methods for managing indentation
def indentation(self):
return self._indent * self._indentLev
def indent(self):
self._indentLev += 1
def dedent(self):
if self._indentLev:
self._indentLev -= 1
else:
raise Error('Attempt to dedent when the indentLev is 0')
# methods for final code wrapping
def methodDef(self):
if self._methodDef:
return self._methodDef
else:
return self.wrapCode()
__str__ = methodDef
__unicode__ = methodDef
def wrapCode(self):
self.commitStrConst()
methodDefChunks = (
self.methodSignature(),
'\n',
self.docString(),
self.methodBody())
methodDef = ''.join(methodDefChunks)
self._methodDef = methodDef
return methodDef
def methodSignature(self):
return self._indent + self._methodSignature + ':'
def setMethodSignature(self, signature):
self._methodSignature = signature
def methodBody(self):
return ''.join(self._methodBodyChunks)
def docString(self):
if not self._docStringLines:
return ''
ind = self._indent*2 # noqa: E226 missing whitespace around operator
docStr = (ind + '"""\n' + ind
+ ('\n' + ind).join([ln.replace('"""', "'''")
for ln in self._docStringLines])
+ '\n' + ind + '"""\n')
return docStr
# methods for adding code
def addMethDocString(self, line):
self._docStringLines.append(line.replace('%', '%%'))
def addChunk(self, chunk):
self.commitStrConst()
chunk = "\n" + self.indentation() + chunk
self._methodBodyChunks.append(chunk)
def appendToPrevChunk(self, appendage):
self._methodBodyChunks[-1] = self._methodBodyChunks[-1] + appendage
def addWriteChunk(self, chunk):
self.addChunk('write(' + chunk + ')')
def addFilteredChunk(self, chunk, filterArgs=None,
rawExpr=None, lineCol=None):
if filterArgs is None:
filterArgs = ''
if self.setting('includeRawExprInFilterArgs') and rawExpr:
filterArgs += ', rawExpr=%s' % repr(rawExpr)
if self.setting('alwaysFilterNone'):
if rawExpr and rawExpr.find('\n') == -1 and \
rawExpr.find('\r') == -1:
self.addChunk("_v = %s # %r" % (chunk, rawExpr))
if lineCol:
self.appendToPrevChunk(' on line %s, col %s' % lineCol)
else:
self.addChunk("_v = %s" % chunk)
if self.setting('useFilters'):
self.addChunk("if _v is not None: write(_filter(_v%s))"
% filterArgs)
else:
self.addChunk("if _v is not None: write(str(_v))")
else:
if self.setting('useFilters'):
self.addChunk("write(_filter(%s%s))" % (chunk, filterArgs))
else:
self.addChunk("write(str(%s))" % chunk)
def _appendToPrevStrConst(self, strConst):
if self._pendingStrConstChunks:
self._pendingStrConstChunks.append(strConst)
else:
self._pendingStrConstChunks = [strConst]
def commitStrConst(self):
"""Add the code for outputting the pending strConst without chopping off
any whitespace from it.
"""
if not self._pendingStrConstChunks:
return
strConst = ''.join(self._pendingStrConstChunks)
self._pendingStrConstChunks = []
if not strConst:
return
reprstr = repr(strConst)
i = 0
out = []
if reprstr.startswith('u'):
i = 1
out = ['u']
body = escapedNewlineRE.sub('\\1\n', reprstr[i+1:-1]) # noqa: E226,E501 missing whitespace around operator
if reprstr[i] == "'":
out.append("'''")
out.append(body)
out.append("'''")
else:
out.append('"""')
out.append(body)
out.append('"""')
self.addWriteChunk(''.join(out))
def handleWSBeforeDirective(self):
"""Truncate the pending strCont to the beginning of the current line.
"""
if self._pendingStrConstChunks:
src = self._pendingStrConstChunks[-1]
BOL = max(src.rfind('\n') + 1, src.rfind('\r') + 1, 0)
if BOL < len(src):
self._pendingStrConstChunks[-1] = src[:BOL]
def isErrorCatcherOn(self):
return self._isErrorCatcherOn
def turnErrorCatcherOn(self):
self._isErrorCatcherOn = True
def turnErrorCatcherOff(self):
self._isErrorCatcherOn = False
# @@TR: consider merging the next two methods into one
def addStrConst(self, strConst):
self._appendToPrevStrConst(strConst)
def addRawText(self, text):
self.addStrConst(text)
def addMethComment(self, comm):
offSet = self.setting('commentOffset')
self.addChunk('#' + ' '*offSet + comm) # noqa: E226,E501 missing whitespace around operator
def addPlaceholder(self, expr, filterArgs, rawPlaceholder,
cacheTokenParts, lineCol,
silentMode=False):
cacheInfo = self.genCacheInfo(cacheTokenParts)
if cacheInfo:
cacheInfo['ID'] = repr(rawPlaceholder)[1:-1]
self.startCacheRegion(cacheInfo, lineCol,
rawPlaceholder=rawPlaceholder)
if self.isErrorCatcherOn():
methodName = self._classCompiler.addErrorCatcherCall(
expr, rawCode=rawPlaceholder, lineCol=lineCol)
expr = 'self.' + methodName + '(localsDict=locals())'
if silentMode:
self.addChunk('try:')
self.indent()
self.addFilteredChunk(expr, filterArgs, rawPlaceholder,
lineCol=lineCol)
self.dedent()
self.addChunk('except NotFound: pass')
else:
self.addFilteredChunk(expr, filterArgs, rawPlaceholder,
lineCol=lineCol)
if self.setting('outputRowColComments'):
self.appendToPrevChunk(' # from line %s, col %s' % lineCol + '.')
if cacheInfo:
self.endCacheRegion()
def addSilent(self, expr):
self.addChunk(expr)
def addEcho(self, expr, rawExpr=None):
self.addFilteredChunk(expr, rawExpr=rawExpr)
def addSet(self, expr, exprComponents, setStyle):
if setStyle is SET_GLOBAL:
(LVALUE, OP, RVALUE) = (exprComponents.LVALUE,
exprComponents.OP,
exprComponents.RVALUE)
# we need to split the LVALUE to deal with globalSetVars
splitPos1 = LVALUE.find('.')
splitPos2 = LVALUE.find('[')
if splitPos1 > 0 and splitPos2 == -1:
splitPos = splitPos1
elif splitPos1 > 0 and splitPos1 < max(splitPos2, 0):
splitPos = splitPos1
else:
splitPos = splitPos2
if splitPos > 0:
primary = LVALUE[:splitPos]
secondary = LVALUE[splitPos:]
else:
primary = LVALUE
secondary = ''
LVALUE = \
'self._CHEETAH__globalSetVars["' + primary + '"]' + secondary
expr = LVALUE + ' ' + OP + ' ' + RVALUE.strip()
if setStyle is SET_MODULE:
self._moduleCompiler.addModuleGlobal(expr)
else:
self.addChunk(expr)
def addInclude(self, sourceExpr, includeFrom, isRaw):
self.addChunk('self._handleCheetahInclude(' + sourceExpr
+ ', trans=trans, '
+ 'includeFrom="' + includeFrom + '", raw='
+ repr(isRaw) + ')')
def addWhile(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addFor(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addRepeat(self, expr, lineCol=None):
# the _repeatCount stuff here allows nesting of #repeat directives
self._repeatCount = getattr(self, "_repeatCount", -1) + 1
self.addFor('for __i%s in range(%s)'
% (self._repeatCount, expr),
lineCol=lineCol)
def addIndentingDirective(self, expr, lineCol=None):
if expr and not expr[-1] == ':':
expr = expr + ':'
self.addChunk(expr)
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'
% lineCol)
self.indent()
def addReIndentingDirective(self, expr, dedent=True, lineCol=None):
self.commitStrConst()
if dedent:
self.dedent()
if not expr[-1] == ':':
expr = expr + ':'
self.addChunk(expr)
if lineCol:
self.appendToPrevChunk(' # generated from line %s, col %s'
% lineCol)
self.indent()
def addIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addOneLineIf(self, expr, lineCol=None):
"""For a full #if ... #end if directive
"""
self.addIndentingDirective(expr, lineCol=lineCol)
def addTernaryExpr(self, conditionExpr, trueExpr, falseExpr, lineCol=None):
"""For a single-lie #if ... then .... else ... directive
<condition> then <trueExpr> else <falseExpr>
"""
self.addIndentingDirective(conditionExpr, lineCol=lineCol)
self.addFilteredChunk(trueExpr)
self.dedent()
self.addIndentingDirective('else')
self.addFilteredChunk(falseExpr)
self.dedent()
def addElse(self, expr, dedent=True, lineCol=None):
expr = re.sub(r'else[ \f\t]+if', 'elif', expr)
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addElif(self, expr, dedent=True, lineCol=None):
self.addElse(expr, dedent=dedent, lineCol=lineCol)
def addUnless(self, expr, lineCol=None):
self.addIf('if not (' + expr + ')')
def addClosure(self, functionName, argsList, parserComment):
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if arg[1] is not None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
signature = \
"def " + functionName + "(" + ','.join(argStringChunks) + "):"
self.addIndentingDirective(signature)
self.addChunk('#' + parserComment)
def addTry(self, expr, lineCol=None):
self.addIndentingDirective(expr, lineCol=lineCol)
def addExcept(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addFinally(self, expr, dedent=True, lineCol=None):
self.addReIndentingDirective(expr, dedent=dedent, lineCol=lineCol)
def addReturn(self, expr):
assert not self._isGenerator
self.addChunk(expr)
self._hasReturnStatement = True
def addYield(self, expr):
assert not self._hasReturnStatement
self._isGenerator = True
if expr.replace('yield', '').strip():
self.addChunk(expr)
else:
self.addChunk('if _dummyTrans:')
self.indent()
self.addChunk('yield trans.response().getvalue()')
self.addChunk('trans = DummyTransaction()')
self.addChunk('write = trans.response().write')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk('raise TypeError('
'"This method cannot be called with a trans arg")')
self.dedent()
def addPass(self, expr):
self.addChunk(expr)
def addDel(self, expr):
self.addChunk(expr)
def addAssert(self, expr):
self.addChunk(expr)
def addRaise(self, expr):
self.addChunk(expr)
def addBreak(self, expr):
self.addChunk(expr)
def addContinue(self, expr):
self.addChunk(expr)
def addPSP(self, PSP):
self.commitStrConst()
autoIndent = False
if PSP[0] == '=':
PSP = PSP[1:]
if PSP:
self.addWriteChunk('_filter(' + PSP + ')')
return
elif PSP.lower() == 'end':
self.dedent()
return
elif PSP[-1] == '$':
autoIndent = True
PSP = PSP[:-1]
elif PSP[-1] == ':':
autoIndent = True
for line in PSP.splitlines():
self.addChunk(line)
if autoIndent:
self.indent()
def nextCacheID(self):
return ('_' + str(random.randrange(100, 999))
+ str(random.randrange(10000, 99999)))
def startCacheRegion(self, cacheInfo, lineCol, rawPlaceholder=None):
# @@TR: we should add some runtime logging to this
ID = self.nextCacheID()
interval = cacheInfo.get('interval', None)
test = cacheInfo.get('test', None)
customID = cacheInfo.get('id', None)
if customID:
ID = customID
varyBy = cacheInfo.get('varyBy', repr(ID))
self._cacheRegionsStack.append(ID) # attrib of current methodCompiler
# @@TR: add this to a special class var as well
self.addChunk('')
self.addChunk('## START CACHE REGION: ID=' + ID
+ '. line %s, col %s' % lineCol + ' in the source.')
self.addChunk('_RECACHE_%(ID)s = False' % locals())
self.addChunk('_cacheRegion_%(ID)s = self.getCacheRegion(regionID='
% locals()
+ repr(ID)
+ ', cacheInfo=%r' % cacheInfo
+ ')')
self.addChunk('if _cacheRegion_%(ID)s.isNew():' % locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True' % locals())
self.dedent()
self.addChunk('_cacheItem_%(ID)s = _cacheRegion_%(ID)s.getCacheItem('
% locals()
+ varyBy + ')')
self.addChunk('if _cacheItem_%(ID)s.hasExpired():' % locals())
self.indent()
self.addChunk('_RECACHE_%(ID)s = True' % locals())
self.dedent()
if test:
self.addChunk('if ' + test + ':')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True' % locals())
self.dedent()
self.addChunk(
'if (not _RECACHE_%(ID)s) and _cacheItem_%(ID)s.getRefreshTime():'
% locals())
self.indent()
self.addChunk('try:')
self.indent()
self.addChunk('_output = _cacheItem_%(ID)s.renderOutput()' % locals())
self.dedent()
self.addChunk('except KeyError:')
self.indent()
self.addChunk('_RECACHE_%(ID)s = True' % locals())
self.dedent()
self.addChunk('else:')
self.indent()
self.addWriteChunk('_output')
self.addChunk('del _output')
self.dedent()
self.dedent()
self.addChunk(
'if _RECACHE_%(ID)s or not _cacheItem_%(ID)s.getRefreshTime():'
% locals())
self.indent()
self.addChunk('_orig_trans%(ID)s = trans' % locals())
self.addChunk('trans = _cacheCollector_%(ID)s = DummyTransaction()'
% locals())
self.addChunk('write = _cacheCollector_%(ID)s.response().write'
% locals())
if interval:
self.addChunk(("_cacheItem_%(ID)s.setExpiryTime(currentTime() +"
% locals())
+ str(interval) + ")")
def endCacheRegion(self):
ID = self._cacheRegionsStack.pop()
self.addChunk('trans = _orig_trans%(ID)s' % locals())
self.addChunk('write = trans.response().write')
self.addChunk(
'_cacheData = _cacheCollector_%(ID)s.response().getvalue()'
% locals())
self.addChunk('_cacheItem_%(ID)s.setData(_cacheData)' % locals())
self.addWriteChunk('_cacheData')
self.addChunk('del _cacheData')
self.addChunk('del _cacheCollector_%(ID)s' % locals())
self.addChunk('del _orig_trans%(ID)s' % locals())
self.dedent()
self.addChunk('## END CACHE REGION: ' + ID)
self.addChunk('')
def nextCallRegionID(self):
return self.nextCacheID()
def startCallRegion(self, functionName, args, lineCol, regionTitle='CALL'):
class CallDetails(object):
pass
callDetails = CallDetails()
callDetails.ID = ID = self.nextCallRegionID()
callDetails.functionName = functionName
callDetails.args = args
callDetails.lineCol = lineCol
callDetails.usesKeywordArgs = False
# attrib of current methodCompiler
self._callRegionsStack.append((ID, callDetails))
self.addChunk('## START %(regionTitle)s REGION: ' % locals() + ID
+ ' of ' + functionName
+ ' at line %s, col %s' % lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans' % locals())
self.addChunk('_wasBuffering%(ID)s = self._CHEETAH__isBuffering'
% locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'
% locals())
self.addChunk('write = _callCollector%(ID)s.response().write'
% locals())
def setCallArg(self, argName, lineCol):
ID, callDetails = self._callRegionsStack[-1]
argName = str(argName)
if callDetails.usesKeywordArgs:
self._endCallArg()
else:
callDetails.usesKeywordArgs = True
self.addChunk('_callKws%(ID)s = {}' % locals())
self.addChunk('_currentCallArgname%(ID)s = %(argName)r' % locals())
callDetails.currentArgname = argName
def _endCallArg(self):
ID, callDetails = self._callRegionsStack[-1]
currCallArg = callDetails.currentArgname
self.addChunk('_callKws%(ID)s[%(currCallArg)r] ='
' _callCollector%(ID)s.response().getvalue()'
% locals())
self.addChunk('del _callCollector%(ID)s' % locals())
self.addChunk('trans = _callCollector%(ID)s = DummyTransaction()'
% locals())
self.addChunk('write = _callCollector%(ID)s.response().write'
% locals())
def endCallRegion(self, regionTitle='CALL'):
ID, callDetails = self._callRegionsStack[-1]
functionName, initialKwArgs, lineCol = (
callDetails.functionName, callDetails.args, callDetails.lineCol)
def reset(ID=ID):
self.addChunk('trans = _orig_trans%(ID)s' % locals())
self.addChunk('write = trans.response().write')
self.addChunk('self._CHEETAH__isBuffering = _wasBuffering%(ID)s '
% locals())
self.addChunk('del _wasBuffering%(ID)s' % locals())
self.addChunk('del _orig_trans%(ID)s' % locals())
if not callDetails.usesKeywordArgs:
reset()
self.addChunk(
'_callArgVal%(ID)s = '
'_callCollector%(ID)s.response().getvalue()' % locals())
self.addChunk('del _callCollector%(ID)s' % locals())
if initialKwArgs:
initialKwArgs = ', ' + initialKwArgs
self.addFilteredChunk(
'%(functionName)s(_callArgVal%(ID)s%(initialKwArgs)s)'
% locals())
self.addChunk('del _callArgVal%(ID)s' % locals())
else:
if initialKwArgs:
initialKwArgs = initialKwArgs + ', '
self._endCallArg()
reset()
self.addFilteredChunk(
'%(functionName)s(%(initialKwArgs)s**_callKws%(ID)s)'
% locals())
self.addChunk('del _callKws%(ID)s' % locals())
self.addChunk('## END %(regionTitle)s REGION: ' % locals() + ID
+ ' of ' + functionName
+ ' at line %s, col %s' % lineCol + ' in the source.')
self.addChunk('')
self._callRegionsStack.pop() # attrib of current methodCompiler
def nextCaptureRegionID(self):
return self.nextCacheID()
def startCaptureRegion(self, assignTo, lineCol):
class CaptureDetails:
pass
captureDetails = CaptureDetails()
captureDetails.ID = ID = self.nextCaptureRegionID()
captureDetails.assignTo = assignTo
captureDetails.lineCol = lineCol
# attrib of current methodCompiler
self._captureRegionsStack.append((ID, captureDetails))
self.addChunk('## START CAPTURE REGION: ' + ID + ' ' + assignTo
+ ' at line %s, col %s' % lineCol + ' in the source.')
self.addChunk('_orig_trans%(ID)s = trans' % locals())
self.addChunk(
'_wasBuffering%(ID)s = self._CHEETAH__isBuffering' % locals())
self.addChunk('self._CHEETAH__isBuffering = True')
self.addChunk(
'trans = _captureCollector%(ID)s = DummyTransaction()' % locals())
self.addChunk(
'write = _captureCollector%(ID)s.response().write' % locals())
def endCaptureRegion(self):
ID, captureDetails = self._captureRegionsStack.pop()
assignTo, lineCol = (captureDetails.assignTo, captureDetails.lineCol)
self.addChunk('trans = _orig_trans%(ID)s' % locals())
self.addChunk('write = trans.response().write')
self.addChunk(
'self._CHEETAH__isBuffering = _wasBuffering%(ID)s ' % locals())
self.addChunk(
'%(assignTo)s = _captureCollector%(ID)s.response().getvalue()'
% locals())
self.addChunk('del _orig_trans%(ID)s' % locals())
self.addChunk('del _captureCollector%(ID)s' % locals())
self.addChunk('del _wasBuffering%(ID)s' % locals())
def setErrorCatcher(self, errorCatcherName):
self.turnErrorCatcherOn()
self.addChunk(
'if "' + errorCatcherName + '" in self._CHEETAH__errorCatchers:')
self.indent()
self.addChunk(
'self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'self._CHEETAH__errorCatcher = self._CHEETAH__errorCatchers["'
+ errorCatcherName + '"] = ErrorCatchers.'
+ errorCatcherName + '(self)'
)
self.dedent()
def nextFilterRegionID(self):
return self.nextCacheID()
def setTransform(self, transformer, isKlass):
self.addChunk('trans = TransformerTransaction()')
self.addChunk('trans._response = trans.response()')
self.addChunk('trans._response._filter = %s' % transformer)
self.addChunk('write = trans._response.write')
def setFilter(self, theFilter, isKlass):
class FilterDetails:
pass
filterDetails = FilterDetails()
filterDetails.ID = ID = self.nextFilterRegionID()
filterDetails.theFilter = theFilter
filterDetails.isKlass = isKlass
# attrib of current methodCompiler
self._filterRegionsStack.append((ID, filterDetails))
self.addChunk('_orig_filter%(ID)s = _filter' % locals())
if isKlass:
self.addChunk(
'_filter = self._CHEETAH__currentFilter = '
+ theFilter.strip() + '(self).filter')
else:
if theFilter.lower() == 'none':
self.addChunk('_filter = self._CHEETAH__initialFilter')
else:
# is string representing the name of a builtin filter
self.addChunk('filterName = ' + repr(theFilter))
self.addChunk(
'if "' + theFilter + '" in self._CHEETAH__filters:')
self.indent()
self.addChunk(
'_filter = self._CHEETAH__currentFilter = '
'self._CHEETAH__filters[filterName]')
self.dedent()
self.addChunk('else:')
self.indent()
self.addChunk(
'_filter = self._CHEETAH__currentFilter'
+ ' = \\\n\t\t\tself._CHEETAH__filters[filterName] = '
+ 'getattr(self._CHEETAH__filtersLib, filterName)'
+ '(self).filter')
self.dedent()
def closeFilterBlock(self):
ID, filterDetails = self._filterRegionsStack.pop()
# self.addChunk('_filter = self._CHEETAH__initialFilter')
# self.addChunk('_filter = _orig_filter%(ID)s'%locals())
self.addChunk(
'_filter = self._CHEETAH__currentFilter = _orig_filter%(ID)s'
% locals())
class AutoMethodCompiler(MethodCompiler):
def _setupState(self):
MethodCompiler._setupState(self)
self._argStringList = [("self", None)]
self._streamingEnabled = True
self._isClassMethod = None
self._isStaticMethod = None
def _useKWsDictArgForPassingTrans(self):
alreadyHasTransArg = [
argname for argname, defval in self._argStringList
if argname == 'trans']
return (self.methodName() != 'respond'
and not alreadyHasTransArg
and self.setting('useKWsDictArgForPassingTrans'))
def isClassMethod(self):
if self._isClassMethod is None:
self._isClassMethod = '@classmethod' in self._decorators
return self._isClassMethod
def isStaticMethod(self):
if self._isStaticMethod is None:
self._isStaticMethod = '@staticmethod' in self._decorators
return self._isStaticMethod
def cleanupState(self):
MethodCompiler.cleanupState(self)
self.commitStrConst()
if self._cacheRegionsStack:
self.endCacheRegion()
if self._callRegionsStack:
self.endCallRegion()
if self._streamingEnabled:
kwargsName = None
positionalArgsListName = None
for argname, defval in self._argStringList:
if argname.strip().startswith('**'):
kwargsName = argname.strip().replace('**', '')
break
elif argname.strip().startswith('*'):
positionalArgsListName = argname.strip().replace('*', '')
if not kwargsName and self._useKWsDictArgForPassingTrans():
kwargsName = 'KWS'
self.addMethArg('**KWS', None)
self._kwargsName = kwargsName
if not self._useKWsDictArgForPassingTrans():
if not kwargsName and not positionalArgsListName:
self.addMethArg('trans', 'None')
else:
self._streamingEnabled = False
self._indentLev = self.setting('initialMethIndentLevel')
mainBodyChunks = self._methodBodyChunks
self._methodBodyChunks = []
self._addAutoSetupCode()
self._methodBodyChunks.extend(mainBodyChunks)
self._addAutoCleanupCode()
def _addAutoSetupCode(self):
if self._initialMethodComment:
self.addChunk(self._initialMethodComment)
if self._streamingEnabled and \
not self.isClassMethod() and not self.isStaticMethod():
if self._useKWsDictArgForPassingTrans() and self._kwargsName:
self.addChunk('trans = %s.get("trans")' % self._kwargsName)
self.addChunk('if (not trans and not self._CHEETAH__isBuffering'
' and not callable(self.transaction)):')
self.indent()
self.addChunk('trans = self.transaction'
' # is None unless self.awake() was called')
self.dedent()
self.addChunk('if not trans:')
self.indent()
self.addChunk('trans = DummyTransaction()')
if self.setting('autoAssignDummyTransactionToSelf'):
self.addChunk('self.transaction = trans')
self.addChunk('_dummyTrans = True')
self.dedent()
self.addChunk('else: _dummyTrans = False')
else:
self.addChunk('trans = DummyTransaction()')
self.addChunk('_dummyTrans = True')
self.addChunk('write = trans.response().write')
if self.setting('useNameMapper'):
argNames = [arg[0] for arg in self._argStringList]
allowSearchListAsMethArg = self.setting('allowSearchListAsMethArg')
if allowSearchListAsMethArg and 'SL' in argNames:
pass
elif allowSearchListAsMethArg and 'searchList' in argNames:
self.addChunk('SL = searchList')
elif not self.isClassMethod() and not self.isStaticMethod():
self.addChunk('SL = self._CHEETAH__searchList')
else:
self.addChunk('SL = [KWS]')
if self.setting('useFilters'):
if self.isClassMethod() or self.isStaticMethod():
self.addChunk('_filter = lambda x, **kwargs: unicode(x)')
else:
self.addChunk('_filter = self._CHEETAH__currentFilter')
self.addChunk('')
self.addChunk("#"*40) # noqa: E226 missing whitespace around operator
self.addChunk('## START - generated method body')
self.addChunk('')
def _addAutoCleanupCode(self):
self.addChunk('')
self.addChunk("#"*40) # noqa: E226 missing whitespace around operator
self.addChunk('## END - generated method body')
self.addChunk('')
if not self._isGenerator:
self.addStop()
self.addChunk('')
def addStop(self, expr=None):
self.addChunk(
'return _dummyTrans and trans.response().getvalue() or ""')
def addMethArg(self, name, defVal=None):
self._argStringList.append((name, defVal))
def methodSignature(self):
argStringChunks = []
for arg in self._argStringList:
chunk = arg[0]
if chunk == 'self' and self.isClassMethod():
chunk = 'cls'
if chunk == 'self' and self.isStaticMethod():
# Skip the "self" method for @staticmethod decorators
continue
if arg[1] is not None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = (', ').join(argStringChunks)
output = []
if self._decorators:
output.append(''.join([self._indent + decorator + '\n'
for decorator in self._decorators]))
output.append(self._indent + "def "
+ self.methodName() + "("
+ argString + "):\n\n")
return ''.join(output)
##################################################
# CLASS COMPILERS
_initMethod_initCheetah = """\
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
""".replace('\n', '\n' + ' '*8) # noqa: E226,E501 missing whitespace around operator
class ClassCompiler(GenUtils):
methodCompilerClass = AutoMethodCompiler
methodCompilerClassForInit = MethodCompiler
def __init__(self, className, mainMethodName='respond',
moduleCompiler=None,
fileName=None,
settingsManager=None):
self._settingsManager = settingsManager
self._fileName = fileName
self._className = className
self._moduleCompiler = moduleCompiler
self._mainMethodName = mainMethodName
self._setupState()
methodCompiler = self._spawnMethodCompiler(
mainMethodName,
initialMethodComment='## CHEETAH: main method '
'generated for this template')
self._setActiveMethodCompiler(methodCompiler)
if fileName and self.setting('monitorSrcFile'):
self._addSourceFileMonitoring(fileName)
def setting(self, key):
return self._settingsManager.setting(key)
def __getattr__(self, name):
"""Provide access to the methods and attributes of the MethodCompiler
at the top of the activeMethods stack: one-way namespace sharing
WARNING: Use .setMethods to assign the attributes of the MethodCompiler
from the methods of this class!!! or you will be assigning to
attributes of this object instead.
"""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeMethodsList and \
hasattr(self._activeMethodsList[-1], name):
return getattr(self._activeMethodsList[-1], name)
else:
raise AttributeError(name)
def _setupState(self):
self._classDef = None
self._decoratorsForNextMethod = []
self._activeMethodsList = [] # stack while parsing/generating
self._finishedMethodsList = [] # store by order
self._methodsIndex = {} # store by name
self._baseClass = 'Template'
self._classDocStringLines = []
# printed after methods in the gen class def:
self._generatedAttribs = ['_CHEETAH__instanceInitialized = False']
self._generatedAttribs.append('_CHEETAH_version = __CHEETAH_version__')
self._generatedAttribs.append(
'_CHEETAH_versionTuple = __CHEETAH_versionTuple__')
if self.setting('addTimestampsToCompilerOutput'):
self._generatedAttribs.append(
'_CHEETAH_genTime = __CHEETAH_genTime__')
self._generatedAttribs.append(
'_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__')
self._generatedAttribs.append('_CHEETAH_src = __CHEETAH_src__')
self._generatedAttribs.append(
'_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__')
if self.setting('templateMetaclass'):
self._generatedAttribs.append(
'__metaclass__ = ' + self.setting('templateMetaclass'))
self._initMethChunks = []
self._blockMetaData = {}
self._errorCatcherCount = 0
self._placeholderToErrorCatcherMap = {}
def cleanupState(self):
while self._activeMethodsList:
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
self._setupInitMethod()
if self._mainMethodName == 'respond':
if self.setting('setup__str__method'):
self._generatedAttribs.append(
'def __str__(self): return self.respond()')
self.addAttribute(
'_mainCheetahMethod_for_' + self._className
+ ' = ' + repr(self._mainMethodName))
def _setupInitMethod(self):
__init__ = self._spawnMethodCompiler(
'__init__', klass=self.methodCompilerClassForInit)
__init__.setMethodSignature("def __init__(self, *args, **KWs)")
__init__.addChunk(
'super(%s, self).__init__(*args, **KWs)' % self._className)
__init__.addChunk(
_initMethod_initCheetah % {'className': self._className})
for chunk in self._initMethChunks:
__init__.addChunk(chunk)
__init__.cleanupState()
self._swallowMethodCompiler(__init__, pos=0)
def _addSourceFileMonitoring(self, fileName):
# @@TR: this stuff needs auditing for Cheetah 2.0
# the first bit is added to init
self.addChunkToInit('self._filePath = ' + repr(fileName))
self.addChunkToInit('self._fileMtime = ' + str(getmtime(fileName)))
# the rest is added to the main output method of the class
# ('mainMethod')
self.addChunk(
'if exists(self._filePath) and '
+ 'getmtime(self._filePath) > self._fileMtime:')
self.indent()
self.addChunk(
'self._compile(file=self._filePath, moduleName='
+ self._className + ')')
self.addChunk(
'write(getattr(self, self._mainCheetahMethod_for_'
+ self._className + ')(trans=trans))')
self.addStop()
self.dedent()
def setClassName(self, name):
self._className = name
def className(self):
return self._className
def setBaseClass(self, baseClassName):
self._baseClass = baseClassName
def setMainMethodName(self, methodName):
if methodName == self._mainMethodName:
return
# change the name in the methodCompiler and add new reference
mainMethod = self._methodsIndex[self._mainMethodName]
mainMethod.setMethodName(methodName)
self._methodsIndex[methodName] = mainMethod
# make sure that fileUpdate code still works properly:
chunkToChange = (
'write(self.' + self._mainMethodName + '(trans=trans))')
chunks = mainMethod._methodBodyChunks
if chunkToChange in chunks:
for i in range(len(chunks)):
if chunks[i] == chunkToChange:
chunks[i] = ('write(self.' + methodName + '(trans=trans))')
# get rid of the old reference and update self._mainMethodName
del self._methodsIndex[self._mainMethodName]
self._mainMethodName = methodName
def setMainMethodArgs(self, argsList):
mainMethodCompiler = self._methodsIndex[self._mainMethodName]
for argName, defVal in argsList:
mainMethodCompiler.addMethArg(argName, defVal)
def _spawnMethodCompiler(self, methodName, klass=None,
initialMethodComment=None):
if klass is None:
klass = self.methodCompilerClass
decorators = self._decoratorsForNextMethod or []
self._decoratorsForNextMethod = []
methodCompiler = klass(methodName, classCompiler=self,
decorators=decorators,
initialMethodComment=initialMethodComment)
self._methodsIndex[methodName] = methodCompiler
return methodCompiler
def _setActiveMethodCompiler(self, methodCompiler):
self._activeMethodsList.append(methodCompiler)
def _getActiveMethodCompiler(self):
return self._activeMethodsList[-1]
def _popActiveMethodCompiler(self):
return self._activeMethodsList.pop()
def _swallowMethodCompiler(self, methodCompiler, pos=None):
methodCompiler.cleanupState()
if pos is None:
self._finishedMethodsList.append(methodCompiler)
else:
self._finishedMethodsList.insert(pos, methodCompiler)
return methodCompiler
def startMethodDef(self, methodName, argsList, parserComment):
methodCompiler = self._spawnMethodCompiler(
methodName, initialMethodComment=parserComment)
self._setActiveMethodCompiler(methodCompiler)
for argName, defVal in argsList:
methodCompiler.addMethArg(argName, defVal)
def _finishedMethods(self):
return self._finishedMethodsList
def addDecorator(self, decoratorExpr):
"""Set the decorator to be used with the next method in the source.
See _spawnMethodCompiler() and MethodCompiler for the details of how
this is used.
"""
self._decoratorsForNextMethod.append(decoratorExpr)
def addClassDocString(self, line):
self._classDocStringLines.append(line.replace('%', '%%'))
def addChunkToInit(self, chunk):
self._initMethChunks.append(chunk)
def addAttribute(self, attribExpr):
# First test to make sure that the user hasn't used
# any fancy Cheetah syntax (placeholders, directives, etc.)
# inside the expression
if attribExpr.find('VFN(') != -1 or attribExpr.find('VFFSL(') != -1:
raise ParseError(
self,
'Invalid #attr directive. It should only contain '
+ 'simple Python literals.')
# now add the attribute
self._generatedAttribs.append(attribExpr)
def addSuper(self, argsList, parserComment=None):
className = self._className # self._baseClass
methodName = self._getActiveMethodCompiler().methodName()
argStringChunks = []
for arg in argsList:
chunk = arg[0]
if arg[1] is not None:
chunk += '=' + arg[1]
argStringChunks.append(chunk)
argString = ','.join(argStringChunks)
self.addFilteredChunk(
'super(%(className)s, self).%(methodName)s(%(argString)s)'
% locals())
def addErrorCatcherCall(self, codeChunk, rawCode='', lineCol=''):
if rawCode in self._placeholderToErrorCatcherMap:
methodName = self._placeholderToErrorCatcherMap[rawCode]
if not self.setting('outputRowColComments'):
self._methodsIndex[methodName].addMethDocString(
'plus at line %s, col %s' % lineCol)
return methodName
self._errorCatcherCount += 1
methodName = '__errorCatcher' + str(self._errorCatcherCount)
self._placeholderToErrorCatcherMap[rawCode] = methodName
catcherMeth = self._spawnMethodCompiler(
methodName,
klass=MethodCompiler,
initialMethodComment=(
'## CHEETAH: Generated from ' + rawCode
+ ' at line %s, col %s' % lineCol + '.')
)
catcherMeth.setMethodSignature(
'def ' + methodName
+ '(self, localsDict={})') # is this use of localsDict right?
catcherMeth.addChunk('try:')
catcherMeth.indent()
catcherMeth.addChunk(
"return eval('''" + codeChunk + "''', globals(), localsDict)")
catcherMeth.dedent()
catcherMeth.addChunk(
'except self._CHEETAH__errorCatcher.exceptions() as e:')
catcherMeth.indent()
catcherMeth.addChunk(
"return self._CHEETAH__errorCatcher.warn(exc_val=e, code= "
+ repr(codeChunk) + " , rawCode= "
+ repr(rawCode) + " , lineCol=" + str(lineCol) + ")")
catcherMeth.cleanupState()
self._swallowMethodCompiler(catcherMeth)
return methodName
def closeDef(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
self._swallowMethodCompiler(methCompiler)
def closeBlock(self):
self.commitStrConst()
methCompiler = self._popActiveMethodCompiler()
methodName = methCompiler.methodName()
if self.setting('includeBlockMarkers'):
endMarker = self.setting('blockMarkerEnd')
methCompiler.addStrConst(endMarker[0] + methodName + endMarker[1])
self._swallowMethodCompiler(methCompiler)
# metaData = self._blockMetaData[methodName]
# rawDirective = metaData['raw']
# lineCol = metaData['lineCol']
# insert the code to call the block, caching if #cache directive is on
codeChunk = 'self.' + methodName + '(trans=trans)'
self.addChunk(codeChunk)
# self.appendToPrevChunk(' # generated from ' + repr(rawDirective) )
# if self.setting('outputRowColComments'):
# self.appendToPrevChunk(' at line %s, col %s' % lineCol + '.')
# code wrapping methods
def classDef(self):
if self._classDef:
return self._classDef
else:
return self.wrapClassDef()
__str__ = classDef
__unicode__ = classDef
def wrapClassDef(self):
ind = self.setting('indentationStep')
classDefChunks = [self.classSignature(),
self.classDocstring(),
]
def addMethods():
classDefChunks.extend([
ind + '#'*50, # noqa: E226 missing whitespace around operator
ind + '## CHEETAH GENERATED METHODS',
'\n',
self.methodDefs(),
])
def addAttributes():
classDefChunks.extend([
ind + '#'*50, # noqa: E226 missing whitespace around operator
ind + '## CHEETAH GENERATED ATTRIBUTES',
'\n',
self.attributes(),
])
if self.setting('outputMethodsBeforeAttributes'):
addMethods()
addAttributes()
else:
addAttributes()
addMethods()
classDef = '\n'.join(classDefChunks)
self._classDef = classDef
return classDef
def classSignature(self):
return "class %s(%s):" % (self.className(), self._baseClass)
def classDocstring(self):
if not self._classDocStringLines:
return ''
ind = self.setting('indentationStep')
docStr = ('%(ind)s"""\n%(ind)s'
+ '\n%(ind)s'.join(self._classDocStringLines)
+ '\n%(ind)s"""\n'
) % {'ind': ind}
return docStr
def methodDefs(self):
methodDefs = [
methGen.methodDef() for methGen in self._finishedMethods()]
return '\n\n'.join(methodDefs)
def attributes(self):
try:
attribs = [self.setting('indentationStep') + str(attrib)
for attrib in self._generatedAttribs]
except UnicodeEncodeError:
attribs = [self.setting('indentationStep') + unicode(attrib)
for attrib in self._generatedAttribs]
return '\n\n'.join(attribs)
class AutoClassCompiler(ClassCompiler):
pass
##################################################
# MODULE COMPILERS
class ModuleCompiler(SettingsManager, GenUtils):
parserClass = Parser
classCompilerClass = AutoClassCompiler
def __init__(self, source=None, file=None,
moduleName='DynamicallyCompiledCheetahTemplate',
mainClassName=None, # string
mainMethodName=None, # string
baseclassName=None, # string
extraImportStatements=None, # list of strings
settings=None # dict
):
super(ModuleCompiler, self).__init__()
if settings:
self.updateSettings(settings)
# disable useStackFrames if the C version of NameMapper isn't compiled
# it's painfully slow in the Python version and bites Windows users all
# the time:
if not NameMapper.C_VERSION:
self.setSetting('useStackFrames', False)
self._compiled = False
self._moduleName = moduleName
if not mainClassName:
self._mainClassName = moduleName
else:
self._mainClassName = mainClassName
self._mainMethodNameArg = mainMethodName
if mainMethodName:
self.setSetting('mainMethodName', mainMethodName)
self._baseclassName = baseclassName
self._filePath = None
self._fileMtime = None
if source and file:
raise TypeError("Cannot compile from a source string AND file.")
elif isinstance(file, string_type): # it's a filename.
encoding = self.settings().get('encoding')
if encoding:
f = codecs.open(file, 'r', encoding=encoding)
else:
# if no encoding is specified, use the builtin open function
f = open(file, 'r')
source = f.read()
f.close()
self._filePath = file
self._fileMtime = os.path.getmtime(file)
elif hasattr(file, 'read'):
# Can't set filename or mtime -- they're not accessible
source = file.read()
elif file:
raise TypeError(
"'file' argument must be a filename string or file-like object"
)
if self._filePath:
self._fileDirName, self._fileBaseName = \
os.path.split(self._filePath)
self._fileBaseNameRoot, self._fileBaseNameExt = \
os.path.splitext(self._fileBaseName)
if not isinstance(source, string_type):
# By converting to unicode here we allow objects
# such as other Templates to be passed in
source = unicode(source)
# Handle the #indent directive by converting it to other directives.
# (Over the long term we'll make it a real directive.)
if source == "":
warnings.warn("You supplied an empty string for the source!", )
else:
unicodeMatch = unicodeDirectiveRE.search(source)
encodingMatch = encodingDirectiveRE.search(source)
if unicodeMatch:
if encodingMatch:
raise ParseError(
self, "#encoding and #unicode are mutually exclusive! "
"Use one or the other.")
source = unicodeDirectiveRE.sub('', source)
if isinstance(source, bytes):
encoding = unicodeMatch.group(1) or 'ascii'
source = source.decode(encoding)
elif encodingMatch:
encodings = encodingMatch.groups()
if len(encodings):
encoding = encodings[0]
if isinstance(source, bytes):
source = source.decode(encoding)
else:
source = eval(
repr(source).encode("ascii", "backslashreplace")
.decode(encoding))
else:
source = unicode(source)
if source.find('#indent') != -1: # @@TR: undocumented hack
source = indentize(source)
self._parser = self.parserClass(source, filename=self._filePath,
compiler=self)
self._setupCompilerState()
def __getattr__(self, name):
"""Provide one-way access to the methods and attributes of the
ClassCompiler, and thereby the MethodCompilers as well.
WARNING: Use .setMethods to assign the attributes of the ClassCompiler
from the methods of this class!!! Or you will be assigning to
attributes of this object instead.
"""
if name in self.__dict__:
return self.__dict__[name]
elif hasattr(self.__class__, name):
return getattr(self.__class__, name)
elif self._activeClassesList and \
hasattr(self._activeClassesList[-1], name):
return getattr(self._activeClassesList[-1], name)
else:
raise AttributeError(name)
def _initializeSettings(self):
self.updateSettings(copy.deepcopy(DEFAULT_COMPILER_SETTINGS))
def _setupCompilerState(self):
self._activeClassesList = []
self._finishedClassesList = [] # listed by ordered
self._finishedClassIndex = {} # listed by name
self._moduleDef = None
self._moduleShBang = '#!/usr/bin/env python'
self._moduleEncoding = 'ascii'
self._moduleEncodingStr = ''
self._moduleHeaderLines = []
self._moduleDocStringLines = []
self._specialVars = {}
self._importStatements = [
"import sys",
"import os",
"import os.path",
'try:',
' import builtins as builtin',
'except ImportError:',
' import __builtin__ as builtin',
"from os.path import getmtime, exists",
"import time",
"import types",
"from Cheetah.Version import MinCompatibleVersion as "
"RequiredCheetahVersion",
"from Cheetah.Version import MinCompatibleVersionTuple "
"as RequiredCheetahVersionTuple",
"from Cheetah.Template import Template",
"from Cheetah.DummyTransaction import *",
"from Cheetah.NameMapper import NotFound, "
"valueForName, valueFromSearchList, valueFromFrameOrSearchList",
"from Cheetah.CacheRegion import CacheRegion",
"import Cheetah.Filters as Filters",
"import Cheetah.ErrorCatchers as ErrorCatchers",
"from Cheetah.compat import unicode",
]
self._importedVarNames = ['sys',
'os',
'os.path',
'time',
'types',
'Template',
'DummyTransaction',
'NotFound',
'Filters',
'ErrorCatchers',
'CacheRegion',
]
self._moduleConstants = [
"VFFSL=valueFromFrameOrSearchList",
"VFSL=valueFromSearchList",
"VFN=valueForName",
"currentTime=time.time",
]
def compile(self):
classCompiler = self._spawnClassCompiler(self._mainClassName)
if self._baseclassName:
classCompiler.setBaseClass(self._baseclassName)
self._addActiveClassCompiler(classCompiler)
self._parser.parse()
self._swallowClassCompiler(self._popActiveClassCompiler())
self._compiled = True
self._parser.cleanup()
def _spawnClassCompiler(self, className, klass=None):
if klass is None:
klass = self.classCompilerClass
classCompiler = klass(className,
moduleCompiler=self,
mainMethodName=self.setting('mainMethodName'),
fileName=self._filePath,
settingsManager=self,
)
return classCompiler
def _addActiveClassCompiler(self, classCompiler):
self._activeClassesList.append(classCompiler)
def _getActiveClassCompiler(self):
return self._activeClassesList[-1]
def _popActiveClassCompiler(self):
return self._activeClassesList.pop()
def _swallowClassCompiler(self, classCompiler):
classCompiler.cleanupState()
self._finishedClassesList.append(classCompiler)
self._finishedClassIndex[classCompiler.className()] = classCompiler
return classCompiler
def _finishedClasses(self):
return self._finishedClassesList
def importedVarNames(self):
return self._importedVarNames
def addImportedVarNames(self, varNames, raw_statement=None):
settings = self.settings()
if not varNames:
return
if not settings.get('useLegacyImportMode'):
if raw_statement and getattr(self, '_methodBodyChunks'):
self.addChunk(raw_statement)
else:
self._importedVarNames.extend(varNames)
# methods for adding stuff to the module and class definitions
def setBaseClass(self, baseClassName):
if self._mainMethodNameArg:
self.setMainMethodName(self._mainMethodNameArg)
else:
self.setMainMethodName(self.setting('mainMethodNameForSubclasses'))
if self.setting('handlerForExtendsDirective'):
handler = self.setting('handlerForExtendsDirective')
baseClassName = handler(compiler=self, baseClassName=baseClassName)
self._getActiveClassCompiler().setBaseClass(baseClassName)
elif (not self.setting('autoImportForExtendsDirective')
or baseClassName == 'object'
or baseClassName in self.importedVarNames()):
self._getActiveClassCompiler().setBaseClass(baseClassName)
# no need to import
else:
##################################################
# If the #extends directive contains a classname or modulename
# that isn't in self.importedVarNames() already,
# we assume that we need to add an implied
# 'from ModName import ClassName' where ModName == ClassName.
# - This is the case in WebKit servlet modules.
# - We also assume that the final . separates the classname
# from the module name.
# This might break if people do something really fancy
# with their dots and namespaces.
baseclasses = []
for klass in baseClassName.split(','):
klass = klass.strip()
chunks = klass.split('.')
if len(chunks) == 1:
baseclasses.append(klass)
if klass not in self.importedVarNames():
modName = klass
# we assume the class name to be the module name
# and that it's not a builtin:
importStatement = "from %s import %s" % (
modName, klass)
self.addImportStatement(importStatement)
self.addImportedVarNames((klass,))
else:
needToAddImport = True
modName = chunks[0]
for chunk in chunks[1:-1]:
if modName in self.importedVarNames():
needToAddImport = False
finalBaseClassName = klass.replace(modName + '.',
'')
baseclasses.append(finalBaseClassName)
break
else:
modName += '.' + chunk
if needToAddImport:
modName, finalClassName = (
'.'.join(chunks[:-1]), chunks[-1])
# if finalClassName != chunks[:-1][-1]:
if finalClassName != chunks[-2]:
# we assume the class name to be the module name
modName = '.'.join(chunks)
baseclasses.append(finalClassName)
importStatement = "from %s import %s" % (
modName, finalClassName)
self.addImportStatement(importStatement)
self.addImportedVarNames([finalClassName])
self._getActiveClassCompiler().setBaseClass(', '.join(baseclasses))
def setCompilerSetting(self, key, valueExpr):
self.setSetting(key, eval(valueExpr))
self._parser.configureParser()
def setCompilerSettings(self, keywords, settingsStr):
KWs = keywords
if 'reset' in KWs:
# @@TR: this is actually caught by the parser at the moment.
# subject to change in the future
self._initializeSettings()
self._parser.configureParser()
return
elif 'python' in KWs:
settingsReader = self.updateSettingsFromPySrcStr
# this comes from SettingsManager
else:
# this comes from SettingsManager
settingsReader = self.updateSettingsFromConfigStr
settingsReader(settingsStr)
self._parser.configureParser()
def setShBang(self, shBang):
self._moduleShBang = shBang
def setModuleEncoding(self, encoding):
self._moduleEncoding = encoding
def getModuleEncoding(self):
return self._moduleEncoding
def addModuleHeader(self, line):
"""Adds a header comment to the top of the generated module.
"""
self._moduleHeaderLines.append(line)
def addModuleDocString(self, line):
"""Adds a line to the generated module docstring.
"""
self._moduleDocStringLines.append(line)
def addModuleGlobal(self, line):
"""Adds a line of global module code. It is inserted after the import
statements and Cheetah default module constants.
"""
self._moduleConstants.append(line)
def addSpecialVar(self, basename, contents, includeUnderscores=True):
"""Adds module __specialConstant__ to the module globals.
"""
name = includeUnderscores and '__' + basename + '__' or basename
self._specialVars[name] = contents.strip()
def addImportStatement(self, impStatement):
settings = self.settings()
if not self._methodBodyChunks or settings.get('useLegacyImportMode'):
# In the case where we are importing inline
# in the middle of a source block
# we don't want to inadvertantly import the module
# at the top of the file either
self._importStatements.append(impStatement)
# @@TR 2005-01-01: there's almost certainly a cleaner way to do this!
importVarNames = impStatement[
impStatement.find('import') + len('import'):].split(',')
# handle aliases
importVarNames = [var.split()[-1] for var in importVarNames]
importVarNames = [var for var in importVarNames if not var == '*']
# used by #extend for auto-imports
self.addImportedVarNames(importVarNames, raw_statement=impStatement)
def addAttribute(self, attribName, expr):
self._getActiveClassCompiler().addAttribute(attribName + ' =' + expr)
def addComment(self, comm):
if re.match(r'#+$', comm): # skip bar comments
return
specialVarMatch = specialVarRE.match(comm)
if specialVarMatch:
# @@TR: this is a bit hackish and is being replaced with
# #set module varName = ...
return self.addSpecialVar(specialVarMatch.group(1),
comm[specialVarMatch.end():])
elif comm.startswith('doc:'):
addLine = self.addMethDocString
comm = comm[len('doc:'):].strip()
elif comm.startswith('doc-method:'):
addLine = self.addMethDocString
comm = comm[len('doc-method:'):].strip()
elif comm.startswith('doc-module:'):
addLine = self.addModuleDocString
comm = comm[len('doc-module:'):].strip()
elif comm.startswith('doc-class:'):
addLine = self.addClassDocString
comm = comm[len('doc-class:'):].strip()
elif comm.startswith('header:'):
addLine = self.addModuleHeader
comm = comm[len('header:'):].strip()
else:
addLine = self.addMethComment
for line in comm.splitlines():
addLine(line)
# methods for module code wrapping
def getModuleCode(self):
if not self._compiled:
self.compile()
if self._moduleDef:
return self._moduleDef
else:
return self.wrapModuleDef()
def __to_bytes(self):
code = self.getModuleCode()
if isinstance(code, bytes):
return code
return code.encode(self.getModuleEncoding())
def __to_unicode(self):
code = self.getModuleCode()
if isinstance(code, bytes):
return code.decode(self.getModuleEncoding())
return code
if PY2:
__str__ = __to_bytes
__unicode__ = __to_unicode
else:
__bytes__ = __to_bytes
__str__ = __to_unicode
def wrapModuleDef(self):
self.addSpecialVar('CHEETAH_docstring', self.setting('defDocStrMsg'))
self.addModuleGlobal('__CHEETAH_version__ = %r' % Version)
self.addModuleGlobal('__CHEETAH_versionTuple__ = %r' % (VersionTuple,))
if self.setting('addTimestampsToCompilerOutput'):
self.addModuleGlobal('__CHEETAH_genTime__ = %r' % time.time())
self.addModuleGlobal(
'__CHEETAH_genTimestamp__ = %r' % self.timestamp())
if self._filePath:
timestamp = self.timestamp(self._fileMtime)
self.addModuleGlobal('__CHEETAH_src__ = %r' % self._filePath)
self.addModuleGlobal(
'__CHEETAH_srcLastModified__ = %r' % timestamp)
else:
self.addModuleGlobal('__CHEETAH_src__ = None')
self.addModuleGlobal('__CHEETAH_srcLastModified__ = None')
moduleDef = """%(header)s
%(docstring)s
##################################################
## DEPENDENCIES
%(imports)s
##################################################
## MODULE CONSTANTS
%(constants)s
%(specialVars)s
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %%s. Templates compiled before version %%s must be recompiled.'%%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
%(classes)s
## END CLASS DEFINITION
if not hasattr(%(mainClassName)s, '_initCheetahAttributes'):
templateAPIClass = getattr(%(mainClassName)s,
'_CHEETAH_templateClass',
Template)
templateAPIClass._addCheetahPlumbingCodeToClass(%(mainClassName)s)
%(footer)s
""" % {
'header': self.moduleHeader(),
'docstring': self.moduleDocstring(),
'specialVars': self.specialVars(),
'imports': self.importStatements(),
'constants': self.moduleConstants(),
'classes': self.classDefs(),
'footer': self.moduleFooter(),
'mainClassName': self._mainClassName,
} # noqa
self._moduleDef = moduleDef
return moduleDef
def timestamp(self, theTime=None):
if not theTime:
theTime = time.time()
return time.asctime(time.localtime(theTime))
def moduleHeader(self):
header = self._moduleShBang + '\n'
header += self._moduleEncodingStr + '\n'
if self._moduleHeaderLines:
offSet = self.setting('commentOffset')
header += (
'#' + ' '*offSet # noqa: E226,E501 missing whitespace around operator
+ ('\n#' + ' '*offSet).join(self._moduleHeaderLines) # noqa: E226,E501 missing whitespace around operator
+ '\n')
return header
def moduleDocstring(self):
if not self._moduleDocStringLines:
return ''
return ('"""' + '\n'.join(self._moduleDocStringLines)
+ '\n"""\n')
def specialVars(self):
chunks = []
theVars = self._specialVars
keys = sorted(theVars.keys())
for key in keys:
chunks.append(key + ' = ' + repr(theVars[key]))
return '\n'.join(chunks)
def importStatements(self):
return '\n'.join(self._importStatements)
def moduleConstants(self):
return '\n'.join(self._moduleConstants)
def classDefs(self):
classDefs = [klass.classDef() for klass in self._finishedClasses()]
return '\n\n'.join(classDefs)
def moduleFooter(self):
return """
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://cheetahtemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=%(className)s()).run()
""" % {'className': self._mainClassName}
##################################################
# Make Compiler an alias for ModuleCompiler
Compiler = ModuleCompiler
|
[
"329052613@qq.com"
] |
329052613@qq.com
|
b000eea6b4fe6367d08925b8fa050bc5b3370610
|
89ab6ddbd077a9bc4e5b7a79d458baaf7516fa4f
|
/kwstandbyclient/v1/shell_commands/nodes.py
|
b8227a0f094d3b17faa87ae16d42e2e3251a149d
|
[
"Apache-2.0"
] |
permissive
|
frossigneux/python-kwstandbyclient
|
2ec1b7c326728260860cdc6682a91c990d552aff
|
7d9d4235ec97f6761344e3d880b5acbb60e592a9
|
refs/heads/master
| 2021-01-19T16:51:33.707765
| 2014-03-17T16:34:36
| 2014-07-12T13:11:15
| 20,718,682
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,816
|
py
|
# Copyright (c) 2014 Bull.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from kwstandbyclient import command
class ListNodes(command.ListCommand):
"""Print a list of node status."""
resource = 'node'
log = logging.getLogger(__name__ + '.ListNodes')
list_columns = ['node', 'status']
def get_parser(self, prog_name):
parser = super(ListNodes, self).get_parser(prog_name)
parser.add_argument(
'--sort-by', metavar="<node_column>",
help='column name used to sort result',
default='node'
)
return parser
class ShowNode(command.ShowCommand):
"""Show node status."""
resource = 'node'
json_indent = 4
allow_names = False
log = logging.getLogger(__name__ + '.ShowNode')
class UpdateNode(command.UpdateCommand):
"""Update node status."""
resource = 'node'
allow_names = False
log = logging.getLogger(__name__ + '.UpdateNode')
def get_parser(self, prog_name):
parser = super(UpdateNode, self).get_parser(prog_name)
parser.add_argument(
'--status', metavar='<STATUS>',
help='New status for the node'
)
return parser
def args2body(self, parsed_args):
params = {'status': 'standby'}
return params
|
[
"francois.rossigneux@inria.fr"
] |
francois.rossigneux@inria.fr
|
198a2b094565fec7c23f34cb39554603de366025
|
9f1ebef0f493ba2d7b72861a72a68bf17cda5c85
|
/packages/augur-core/tests/contract.py
|
9cfe4f4e17b7b2838e924831e7cd37b861999b04
|
[
"GPL-3.0-or-later",
"GPL-1.0-or-later",
"LGPL-2.1-or-later",
"LGPL-2.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"GPL-3.0-only",
"MIT"
] |
permissive
|
MicrohexHQ/augur
|
c2bd62cb04a825f3661f7b10fb7b4efeab5065a4
|
dad3ce5678e14482bb3b3cc925d398b6d659b2f5
|
refs/heads/master
| 2020-07-27T00:11:12.315221
| 2019-09-16T13:03:15
| 2019-09-16T13:03:15
| 208,801,012
| 0
| 0
|
MIT
| 2019-09-16T13:02:11
| 2019-09-16T13:02:11
| null |
UTF-8
|
Python
| false
| false
| 2,825
|
py
|
from eth_tester.exceptions import TransactionFailed
from decimal import Decimal
class Contract():
def __init__(self, w3, w3Contract, logListener=None, coverageMode=False):
self.w3 = w3
self.w3Contract = w3Contract
self.address = self.w3Contract.address
self.abi = self.w3Contract.abi
self.logListener = logListener
self.coverageMode = coverageMode
if len(self.w3Contract.abi) < 1:
return
for abiFunc in self.w3Contract.functions._functions:
functionName = abiFunc['name']
originalFunction = self.w3Contract.functions.__dict__[functionName]
setattr(self, functionName, self.get_contract_function(originalFunction, abiFunc))
def get_contract_function(self, originalFunction, abiFunc):
def contract_function(*args, sender=self.w3.eth.accounts[0], value=0, getReturnData=True, commitTx=True, debug=False):
contractFunction = originalFunction(*self.processArgs(*args, abiFunc=abiFunc))
retVal = True
outputs = abiFunc['outputs']
# In coverage mode all functions change state through logs so we can't do this optimization
if not self.coverageMode and len(outputs) == 1 and outputs[0]['type'] == 'bool':
getReturnData = False
if getReturnData or abiFunc['constant'] or not commitTx:
retVal = contractFunction.call({'from': sender, 'value': value}, block_identifier='pending')
if not abiFunc['constant'] and commitTx:
tx_hash = contractFunction.transact({'from': sender, 'value': value, 'gasPrice': 1, 'gas': 750000000})
receipt = self.w3.eth.waitForTransactionReceipt(tx_hash, 1)
if receipt.status == 0:
raise TransactionFailed
if self.logListener:
for log in receipt.logs:
self.logListener(log)
return retVal
return contract_function
def processArgs(self, *args, abiFunc):
processedArgs = []
for index, abiParam in enumerate(abiFunc['inputs']):
arg = args[index]
argType = type(arg)
if argType is float or argType is Decimal:
arg = int(arg)
elif abiParam['type'] == 'uint256[]':
arg = [int(item) for item in arg]
elif argType == str and abiParam['type'] == 'bytes32':
arg = arg.encode('utf-8')
elif abiParam['type'] == 'bytes32[]' and type(arg[0]) is not bytes:
arg = [item.encode('utf-8') for item in arg]
processedArgs.append(arg)
return processedArgs
def getLogs(self, eventName):
return self.w3Contract.events.__dict__[eventName].getLogs()
|
[
"achap5dk@gmail.com"
] |
achap5dk@gmail.com
|
65777f375fdc631125c33c071fd30d9ea4168f09
|
1b0545444cdf3bb0cb38279d1de1dc7448a6dae5
|
/src/petsProject/PUPG/urls.py
|
7e30d8bb47b5d40215606b9a858e33fe7ebc231f
|
[] |
no_license
|
JacobGros/pets
|
5795f8a3f68a4323b4cda37ac51fb09cc934a55b
|
495e8aa4b452fa37f1159701642b95473fa1a4df
|
refs/heads/master
| 2020-03-29T12:18:52.090127
| 2018-12-11T21:48:16
| 2018-12-11T21:48:16
| 149,893,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 893
|
py
|
from django.urls import path
from PUPG import views
urlpatterns = [
path("", views.index, name="index"),
path("submit/", views.PetCreateView.as_view(), name="submit"),
path("leaderboard/", views.leaderboard, name="leaderboard"),
path("vote/", views.vote, name="vote"),
path('person/<int:pk>', views.PersonDetailView.as_view(), name='person-detail'),
path('pet/<int:pk>', views.PetDetailView.as_view(), name='pet-detail'),
path('vote/vote/<int:id>/', views.vote_for_pet, name='vote_for_pet'),
path('search/', views.search_view, name='search'),
path('profile/', views.my_profile, name='profile_detail'),
path('leaderboardSpecies/', views.leaderboardSpecies, name='leaderboardSpecies'),
path('help/', views.help, name = 'help')
]
#if settings.DEBUG:
#urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
[
"vagrant@vagrant.vm"
] |
vagrant@vagrant.vm
|
6f7752e41013abe69aceec822e6aa473025712a8
|
e82b761f53d6a3ae023ee65a219eea38e66946a0
|
/All_In_One/addons/gr/m_biped_leg.py
|
febedbbdec798af635d5931aa4fa4ac89c849e53
|
[] |
no_license
|
2434325680/Learnbgame
|
f3a050c28df588cbb3b14e1067a58221252e2e40
|
7b796d30dfd22b7706a93e4419ed913d18d29a44
|
refs/heads/master
| 2023-08-22T23:59:55.711050
| 2021-10-17T07:26:07
| 2021-10-17T07:26:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,713
|
py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any laTter version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
##########################################################################################################
##########################################################################################################
import bpy
from math import radians
from mathutils import Vector
from .constants import Constants
from .utils import create_module_prop_bone
from .utils import bone_settings
from .utils import duplicate_bone
from .utils import get_distance
from .utils import set_parent_chain
from .utils import prop_to_drive_constraint
from .utils import mirror_bone_to_point
from .utils import bone_visibility
from .utils import set_module_on_relevant_bones
from .utils import three_bone_limb
from .utils import isolate_rotation
from .utils import get_parent_name
from .utils import set_bone_only_layer
from .utils import get_ik_group_name
from .utils import prop_to_drive_pbone_attribute_with_array_index
from .utils import prop_to_drive_pbone_attribute_with_array_index
from .utils import create_twist_bones
from .utils import snappable_module
def biped_leg(bvh_tree, shape_collection, module, chain, pole_target_name, shin_bend_back_limit, ik_foot_parent_name, pole_target_parent_name, side, thigh_twist_count, shin_twist_count):
# chain length should be exactly 4
rig = bpy.context.object
ik_group_name = get_ik_group_name(side)
thigh_name = chain[0]
shin_name = chain[1]
foot_name = chain[2]
toes_name = chain[3]
first_parent_name = get_parent_name(thigh_name)
fk_prefix = Constants.fk_prefix
ik_prefix = Constants.ik_prefix
ik_group_name = get_ik_group_name(side=side)
toes_bend_axis = '-X'
shin_bend_axis = '-X'
# bones that should be used for animation
relevant_bone_names = []
# bone that holds all properties of the module
prop_bone_name = create_module_prop_bone(module=module)
# set parent
first_parent_name = get_parent_name(thigh_name)
# LOW-LEVEL BONES
# set parents
for index, name in enumerate(chain):
bpy.ops.object.mode_set(mode='EDIT')
ebones = rig.data.edit_bones
if index == 0:
ebones[name].parent = ebones[first_parent_name]
else:
ebones[name].parent = ebones[chain[index - 1]]
relevant_bone_names.append(name)
bone_settings(bone_name=name,
layer_index=Constants.base_layer,
group_name=Constants.base_group,
use_deform=True,
lock_loc=True,
lock_scale=True,
bone_type=Constants.base_type
)
# _____________________________________________________________________________________________________
# three bone limb set-up
three_bone_limb(bvh_tree=bvh_tree,
shape_collection=shape_collection,
module=module,
b1=thigh_name,
b2=shin_name,
b3=foot_name,
pole_target_name=pole_target_name,
parent_pole_target_to_ik_target=True,
b2_bend_axis='-X',
b2_bend_back_limit=shin_bend_back_limit,
first_parent_name=first_parent_name,
ik_b3_parent_name=ik_foot_parent_name,
pole_target_parent_name=pole_target_parent_name,
b3_shape_up=True,
side=side
)
isolate_rotation(module=module,
parent_bone_name=fk_prefix + first_parent_name,
first_bone_name=fk_prefix + thigh_name
)
# TOES BONE:
# FK
name = fk_prefix + toes_name
duplicate_bone(source_name=toes_name,
new_name=name,
parent_name=fk_prefix + foot_name
)
bone_settings(bvh_tree=bvh_tree,
shape_collection=shape_collection,
bone_name=name,
layer_index=Constants.fk_layer,
group_name=Constants.fk_group,
lock_loc=True,
lock_scale=True,
bone_shape_name='sphere',
bone_shape_pos='MIDDLE',
bone_shape_up=True,
bone_shape_up_only_for_transform=True,
bone_type=Constants.fk_type
)
relevant_bone_names.append(name)
# bind low-level bones to FK constraints
bpy.ops.object.mode_set(mode='POSE')
pbone = rig.pose.bones[toes_name]
c = pbone.constraints.new('COPY_ROTATION')
c.name = 'bind_to_fk_1'
c.target = rig
c.subtarget = fk_prefix + toes_name
c.mute = True
# lock toe axes
if toes_bend_axis == 'X' or toes_bend_axis == '-X':
lock_1 = 1
lock_2 = 2
for ai in [lock_1, lock_2]:
prop_to_drive_pbone_attribute_with_array_index(prop_bone_name=name,
bone_name=name,
prop_name='limit_fk_toes' + side,
attribute='lock_rotation',
array_index=ai,
prop_min=0,
prop_max=1,
prop_default=0,
description='limit toes to single axis rotation',
expression='v1'
)
# filler bones (needed for GYAZ retargeter)
bpy.ops.object.mode_set(mode='EDIT')
filler_name = 'fk_filler_' + thigh_name
ebone = rig.data.edit_bones.new(name=filler_name)
ebones = rig.data.edit_bones
ebone.head = ebones[first_parent_name].head
ebone.tail = ebones[thigh_name].head
ebone.roll = 0
ebone.parent = ebones[first_parent_name]
set_bone_only_layer(bone_name=filler_name,
layer_index=Constants.fk_extra_layer
)
# IK
name = ik_prefix + toes_name
duplicate_bone(source_name=toes_name,
new_name=name,
parent_name=ik_prefix + foot_name
)
bone_settings(bvh_tree=bvh_tree,
shape_collection=shape_collection,
bone_name=name,
layer_index=Constants.ctrl_ik_layer,
group_name=ik_group_name,
lock_loc=True,
lock_scale=True,
bone_shape_name='cube',
bone_shape_pos='MIDDLE',
bone_shape_up=True,
bone_shape_up_only_for_transform=True,
bone_type=Constants.ik_type
)
relevant_bone_names.append(name)
# lock toe axes
if toes_bend_axis == 'X' or toes_bend_axis == '-X':
lock_1 = 1
lock_2 = 2
for ai in [lock_1, lock_2]:
prop_to_drive_pbone_attribute_with_array_index(prop_bone_name=name,
bone_name=name,
prop_name='limit_ik_toes' + side,
attribute='lock_rotation',
array_index=ai,
prop_min=0,
prop_max=1,
prop_default=1,
description='limit toes to single axis rotation',
expression='v1'
)
# bind low-level bones to IK constraints
bpy.ops.object.mode_set(mode='POSE')
pbone = rig.pose.bones[toes_name]
c = pbone.constraints.new('COPY_ROTATION')
c.name = 'bind_to_ik_1'
c.target = rig
c.subtarget = ik_prefix + toes_name
c.mute = True
# BIND TO (0: FK, 1: IK, 2:BIND)
prop_to_drive_constraint(prop_bone_name=prop_bone_name,
bone_name=toes_name,
constraint_name='bind_to_fk_1',
prop_name='switch_' + module,
attribute='mute',
prop_min=0,
prop_max=2,
prop_default=0,
description='0:fk, 1:ik, 2:base',
expression='1 - (v1 < 1)'
)
prop_to_drive_constraint(prop_bone_name=prop_bone_name,
bone_name=toes_name,
constraint_name='bind_to_ik_1',
prop_name='switch_' + module,
attribute='mute',
prop_min=0,
prop_max=2,
prop_default=0,
description='0:fk, 1:ik, 2:base',
expression='1 - (v1 > 0 and v1 < 2)'
)
# SNAP INFO
bpy.ops.object.mode_set(mode='POSE')
pbone = rig.pose.bones[prop_bone_name]
pbone['snapinfo_singlebone_0'] = [fk_prefix + toes_name, ik_prefix + toes_name]
# FOOT ROLL:
# get heel position
bpy.ops.object.mode_set(mode='EDIT')
# set ray start and direction
ray_start = rig.data.edit_bones[toes_name].head
ray_direction = (0, 1, 0)
ray_distance = 1
# cast ray
hit_loc, hit_nor, hit_index, hit_dist = bvh_tree.ray_cast(ray_start,
ray_direction,
ray_distance
)
# third-point of toes.head and hit_loc(heel)
difference = ray_start - hit_loc
difference /= 3
third_point = hit_loc + difference
# ik foot main
bpy.ops.object.mode_set(mode='EDIT')
ebones = rig.data.edit_bones
ik_foot_main_name = ik_prefix + 'main_' + foot_name
ebone = ebones.new(name=ik_foot_main_name)
ik_foot_name = ik_prefix + foot_name
ik_foot_ebone = ebones[ik_foot_name]
foot_length = get_distance(ik_foot_ebone.head, ik_foot_ebone.tail)
ebone.head = ik_foot_ebone.head
ebone.tail = (ik_foot_ebone.head[0], ik_foot_ebone.head[1] - foot_length, ik_foot_ebone.head[2])
ebone.roll = radians(-180) if side == '_l' else radians(180)
ebone.parent = ebones[ik_foot_parent_name]
bone_settings(bvh_tree=bvh_tree,
shape_collection=shape_collection,
bone_name=ik_foot_main_name,
layer_index=Constants.ctrl_ik_layer,
group_name=ik_group_name,
lock_scale=True,
bone_shape_name='cube',
bone_shape_pos='HEAD',
bone_shape_up=True,
bone_type=Constants.ik_type
)
relevant_bone_names.append(ik_foot_main_name)
# ik foot snap target
snap_target_foot_name = 'snap_target_' + foot_name
duplicate_bone(source_name=ik_foot_main_name,
new_name=snap_target_foot_name,
parent_name=fk_prefix + foot_name,
)
bone_settings(bone_name=snap_target_foot_name,
layer_index=Constants.fk_extra_layer,
lock_loc=True,
lock_rot=True,
lock_scale=True
)
# foot roll back
bpy.ops.object.mode_set(mode='EDIT')
ebones = rig.data.edit_bones
foot_roll_back_name = 'roll_back_' + foot_name
ebone = ebones.new(name=foot_roll_back_name)
ebone.head = hit_loc
ebone.tail = third_point
ebone.roll = ebones[foot_name].roll
ebone.parent = ebones[ik_foot_main_name]
bone_settings(bone_name=foot_roll_back_name,
layer_index=Constants.ctrl_ik_extra_layer,
group_name=ik_group_name,
lock_loc=True,
lock_scale=True
)
# foot roll front
bpy.ops.object.mode_set(mode='EDIT')
ebones = rig.data.edit_bones
foot_roll_front_name = 'roll_front_' + foot_name
ebone = ebones.new(name=foot_roll_front_name)
ebone.head = ebones[toes_name].head
ebone.tail = third_point
ebone.roll = ebones[foot_name].roll
ebone.parent = ebones[foot_roll_back_name]
ebones[ik_prefix + foot_name].parent = ebones[foot_roll_front_name]
bone_settings(bone_name=foot_roll_front_name,
layer_index=Constants.ctrl_ik_extra_layer,
group_name=ik_group_name,
lock_loc=True,
lock_scale=True
)
# foot roll main
bpy.ops.object.mode_set(mode='EDIT')
ebones = rig.data.edit_bones
foot_roll_main_name = 'roll_main_' + foot_name
ebone = ebones.new(name=foot_roll_main_name)
ebone.head = ebones[foot_name].head
length = get_distance(ebones[foot_name].head, ebones[foot_name].tail)
ebone.tail = ebone.head + Vector((0, length, 0))
ebone.roll = ebones[foot_name].roll
ebone.parent = ebones[ik_foot_main_name]
bone_settings(bvh_tree=bvh_tree,
shape_collection=shape_collection,
bone_name=foot_roll_main_name,
layer_index=Constants.ctrl_ik_layer,
group_name=ik_group_name,
lock_loc=True,
lock_scale=True,
bone_shape_name='foot_roll',
bone_shape_pos='TAIL',
bone_shape_manual_scale=Constants.target_shape_size,
bone_type=Constants.ik_type
)
relevant_bone_names.append(foot_roll_main_name)
# parent pole target to foot_roll_main_name
bpy.ops.object.mode_set(mode='EDIT')
ebones = rig.data.edit_bones
ebones['target_' + pole_target_name].parent = ebones[ik_foot_main_name]
# ik_toes parent
ik_toes_parent_name = ik_prefix + 'parent_' + toes_name
duplicate_bone(source_name=ik_prefix + toes_name,
new_name=ik_toes_parent_name,
parent_name=ik_prefix + foot_name
)
bone_settings(bone_name=ik_toes_parent_name,
layer_index=Constants.ctrl_ik_extra_layer,
lock_loc=True,
lock_scale=True
)
bpy.ops.object.mode_set(mode='EDIT')
ebones = rig.data.edit_bones
ebones[ik_prefix + toes_name].parent = ebones[ik_toes_parent_name]
# relegate old ik_foot bone
set_bone_only_layer(bone_name=ik_prefix + foot_name,
layer_index=Constants.ctrl_ik_extra_layer
)
# update snap_info
bpy.ops.object.mode_set(mode='POSE')
pbones = rig.pose.bones
old_snap_info = pbones['module_props__' + module]["snapinfo_3bonelimb_0"]
old_snap_info[9], old_snap_info[10], old_snap_info[11] = snap_target_foot_name, ik_foot_main_name, foot_roll_main_name
pbones['module_props__' + module]["snapinfo_3bonelimb_0"] = old_snap_info
bpy.ops.object.mode_set(mode='POSE')
pbones = rig.pose.bones
# foot roll constraints:
# foot roll front
if toes_bend_axis == '-X':
use_x = True
use_z = False
pbone = pbones[foot_roll_front_name]
c = pbone.constraints.new('COPY_ROTATION')
c.name = 'copy foot_roll_main'
c.target = rig
c.subtarget = foot_roll_main_name
c.use_x = use_x
c.use_y = False
c.use_z = use_z
c.invert_x = False
c.invert_y = False
c.invert_z = False
c.use_offset = False
c.target_space = 'LOCAL'
c.owner_space = 'LOCAL'
c.influence = 1
if toes_bend_axis == '-X':
min_x = 0
max_x = radians(180)
min_z = 0
max_z = 0
c = pbone.constraints.new('LIMIT_ROTATION')
c.name = 'limit rotation'
c.owner_space = 'LOCAL'
c.use_transform_limit = True
c.influence = 1
c.use_limit_x = True
c.use_limit_y = True
c.use_limit_z = True
c.min_x = min_x
c.max_x = max_x
c.min_y = 0
c.min_y = 0
c.min_z = min_z
c.max_z = max_z
if toes_bend_axis == '-X':
use_x = True
use_z = False
# foot roll back
pbone = pbones[foot_roll_back_name]
c = pbone.constraints.new('COPY_ROTATION')
c.name = 'copy foot_roll_main_name'
c.target = rig
c.subtarget = foot_roll_main_name
c.use_x = use_x
c.use_y = False
c.use_z = use_z
c.invert_x = use_x
c.invert_y = False
c.invert_z = use_z
c.use_offset = False
c.target_space = 'LOCAL'
c.owner_space = 'LOCAL'
c.influence = 1
if toes_bend_axis == '-X':
min_x = 0
max_x = radians(180)
min_z = 0
max_z = 0
c = pbone.constraints.new('LIMIT_ROTATION')
c.name = 'limit rotation'
c.owner_space = 'LOCAL'
c.use_transform_limit = True
c.influence = 1
c.use_limit_x = True
c.use_limit_y = True
c.use_limit_z = True
c.min_x = min_x
c.max_x = max_x
c.min_y = 0
c.min_y = 0
c.min_z = min_z
c.max_z = max_z
# foot roll main
if toes_bend_axis == '-X':
min_x = radians(-180)
max_x = radians(180)
min_z = 0
max_z = 0
pbone = pbones[foot_roll_main_name]
c = pbone.constraints.new('LIMIT_ROTATION')
c.name = 'limit rotation'
c.owner_space = 'LOCAL'
c.use_transform_limit = True
c.influence = 1
c.use_limit_x = True
c.use_limit_y = True
c.use_limit_z = True
c.min_x = min_x
c.max_x = max_x
c.min_y = 0
c.min_y = 0
c.min_z = min_z
c.max_z = max_z
# ik_toes_parent
if toes_bend_axis == '-X':
use_x = True
use_z = False
pbone = pbones[ik_toes_parent_name]
c = pbone.constraints.new('COPY_ROTATION')
c.name = 'copy foot_roll_front'
c.target = rig
c.subtarget = foot_roll_front_name
c.use_x = use_x
c.use_y = False
c.use_z = use_z
c.invert_x = True
c.invert_y = False
c.invert_z = True
c.use_offset = True
c.target_space = 'LOCAL'
c.owner_space = 'LOCAL'
c.influence = 1
bone_visibility(prop_bone_name=prop_bone_name,
module=module,
relevant_bone_names=relevant_bone_names,
ik_ctrl='ik'
)
# set module name on relevant bones (used by the 'N-panel' interface)
set_module_on_relevant_bones(relevant_bone_names=relevant_bone_names,
module=module
)
# make the 'Snap&Key' operator recognize this module
snappable_module(module=module)
# TWIST BONES
create_twist_bones(bvh_tree=bvh_tree,
shape_collection=shape_collection,
source_bone_name=thigh_name,
count=thigh_twist_count,
upper_or_lower_limb='UPPER',
twist_target_distance=Constants.twist_target_distance,
end_affector_name='',
influences=Constants.forearm_twist_influences,
is_thigh=True
)
create_twist_bones(bvh_tree=bvh_tree,
shape_collection=shape_collection,
source_bone_name=shin_name,
count=shin_twist_count,
upper_or_lower_limb='LOWER',
twist_target_distance=Constants.twist_target_distance,
end_affector_name=foot_name,
influences=Constants.shin_twist_influences,
is_thigh=False
)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
5ddad824f9a1771ecc0c22e87edfbcb5638cd2de
|
b00873d36e44128ce30623da0ee3b556e4e3d7e7
|
/solutions/solution34.py
|
9127e90dde3024fbdc1b4a09dbba1eba976718bf
|
[
"MIT"
] |
permissive
|
Satily/leetcode_python_solution
|
b4aadfd1998877b5086b5423c670750bb422b2c8
|
3f05fff7758d650469862bc28df9e4aa7b1d3203
|
refs/heads/master
| 2021-07-18T07:53:10.387182
| 2021-07-17T06:30:09
| 2021-07-17T06:30:09
| 155,074,789
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 991
|
py
|
class Solution:
def searchRange(self, nums: 'List[int]', target: int) -> 'List[int]':
if len(nums) == 0:
return [-1, -1]
left, right = 0, len(nums) - 1
while right > left:
mid = (left + right) >> 1
if nums[mid] < target:
left = mid + 1
else:
right = mid
lower_bound = left
left, right = 0, len(nums) - 1
while right > left:
mid = ((left + right) >> 1) + ((left + right) & 1)
if nums[mid] > target:
right = mid - 1
else:
left = mid
upper_bound = left
return [lower_bound, upper_bound] if lower_bound <= upper_bound and nums[lower_bound] == target else [-1, -1]
if __name__ == '__main__':
print(Solution().searchRange([5, 7, 7, 8, 8, 10], 8))
print(Solution().searchRange([5, 7, 7, 8, 8, 10], 6))
print(Solution().searchRange([], 0))
|
[
"ladfmcl@126.com"
] |
ladfmcl@126.com
|
24f98750f19eae082a24931ba18df9130c6d5f8c
|
7fed9926b605213f207ef24804c419078defbde0
|
/mission/constants/config.py
|
4809adee6e07b4dae66f11846558712c16dfbd41
|
[
"BSD-3-Clause"
] |
permissive
|
zhaohongqiang/software
|
12ad1330ea3a2c57526d1000f289560a2083a1d0
|
7ac106ef84c33deb99267246125a6be17ef9d164
|
refs/heads/master
| 2020-05-29T12:30:00.527923
| 2016-07-19T06:06:17
| 2016-07-19T06:06:17
| 63,673,097
| 1
| 0
| null | 2016-07-19T07:52:12
| 2016-07-19T07:52:11
| null |
UTF-8
|
Python
| false
| false
| 9
|
py
|
teagle.py
|
[
"software@cuauv.org"
] |
software@cuauv.org
|
cf94c18f53619b6515f45a13125ddacd43f35fb4
|
97e0d474658f4fae5d68def8c23719a7e98eca32
|
/ghost/client.py
|
19928c5a07b0b7267afc30f4b37a7f28d29fbc16
|
[] |
no_license
|
leonardormlins/fateclando
|
3e8ba807599e1a132d2cd40c5eacafe2310cb2b1
|
08f1b0d4c46606c0190f2b0c49bc282f6ad73f29
|
refs/heads/master
| 2022-11-26T13:15:39.001807
| 2020-07-22T13:54:31
| 2020-07-22T13:54:31
| 280,486,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
import socket
import rsa
def encrypt_message(text, pub_key):
return rsa.encrypt(
text,
rsa.PublicKey.load_pkcs1(pub_key, format='PEM')
)
def capturePublicKey():
filePath = "/home/leo/Documents/Fatec/5Semestre/Branquinho/fateclando/ghost/ghostPub.txt"
file = open(filePath,'r')
pub_key = ''
for line in file:
pub_key += line
file.close()
return pub_key
def startClient():
SERVER = '127.0.0.1'
PORT = 5020
pub_key = capturePublicKey()
tcp = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
dest = (SERVER, PORT)
tcp.connect(dest)
tcp.send(pub_key.encode('ascii'))
print ('To get out just tap CTRL+C\n')
msg = input()
while msg != 'x':
tcp.send(
encrypt_message(msg.encode('ascii'), pub_key)
)
msg = input()
tcp.close()
|
[
"leonardolins.ext@hotmail.com"
] |
leonardolins.ext@hotmail.com
|
dbbfedb8e5a864c4327ba65f8e0c6a5752f63d8e
|
d0a9175e68911f7d79ad106027c12ac0981674f9
|
/CodeAcademySolutions/Custom_Print.py
|
3a2ee5d187325f3254f213d318668bb0cc9600ce
|
[] |
no_license
|
mwongeraE/PythonProjects
|
595199e177e6782605fd50da9c91e9fd29464cf9
|
0326841b063a64a4240bc3318c2d688eedbd158d
|
refs/heads/master
| 2021-09-23T12:11:12.481485
| 2018-09-22T13:48:00
| 2018-09-22T13:48:00
| 106,571,226
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 141
|
py
|
board = []
for x in range(0,5):
loop = ["O"] * 5
board.append(loop)
def print_board(board_in):
for row in board:
print row
board
|
[
"evansmwongera@gmail.com"
] |
evansmwongera@gmail.com
|
5110b63f13df8031bb400f7ac712f28be4fdf17f
|
ff47a30ca8475a657adef3c2ca1d087f2c3ed1c9
|
/Ch4.py
|
24dc3801f5a0335aebf2aadc774b9298ad84ec50
|
[] |
no_license
|
bliotti/crypto-challenge
|
0dc90ddad008a9c58a08e9e85484970e07ac2272
|
08a9ec93acbf94684bc8b06e4218718e73cb71ec
|
refs/heads/master
| 2020-08-01T14:48:36.599709
| 2019-12-14T09:00:41
| 2019-12-14T09:00:41
| 211,026,232
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
# https://cryptopals.com/sets/1/challenges/4
# Detect single-character XOR
# One of the 60-character strings in this file has been encrypted by single-character XOR.
# Find it.
# (Your code from #3 should help.)
from Ch3 import decodeByLetter, areWords
f = open("Ch4.txt", "r")
max = 0
for x in f:
c = areWords(decodeByLetter(x))
if c >= max:
max = c
winner = decodeByLetter(x)
print(winner.decode("utf-8"))
|
[
"bliotti@protonmail.com"
] |
bliotti@protonmail.com
|
31e86eeceb39f0063b0cc4250e5d9c3ea0a0e852
|
370c67c6aa1632b9df30df3f85351364e830efcb
|
/client_menu.py
|
6552b9ec1dd4a72159aee21f6572d4efb05a7a6e
|
[] |
no_license
|
AvivBenShoham/screen-party
|
c02b33019469459108773009758271d0bbf80f44
|
d4a0560ab565dcb730987e7a9207182cd0137cb8
|
refs/heads/master
| 2022-01-14T17:31:22.750213
| 2019-06-13T20:14:33
| 2019-06-13T20:14:33
| 191,825,561
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,886
|
py
|
from tkinter import *
from tkinter import messagebox
from client import client
client_online = False
myclient = None
"""tries to connect to host"""
def try_to_connect(): #no need to pass arguments to functions in both cases
global client_online
global myclient
global root
if myclient == None:
myclient = client(root)
try:
x = int(port.get())
except:
messagebox.showinfo("ip and/or port are wrong","ip {0} and port {1} are wrong, please try again".format(hostIP.get(),port.get()))
return
if not myclient.set_client_and_connect(hostIP.get(), int(port.get())): #tries to connect to host by IP and Port
messagebox.showinfo("ip and/or port are wrong","ip {0} and port {1} are wrong, please try again".format(hostIP.get(),port.get()))
return
else:
client_online = True
if not myclient.connect_to_host(password.get()): #sends password to the host
messagebox.showinfo("password is wrong","password is wrong, please try again")
return
messagebox.showinfo("connection established","you have connected successfully")
myclient.start() #gets here if the connection established and the password is right
#makes the connection frame
root = Tk()
hostIP = StringVar()
port = StringVar()
password = StringVar()
label1 = Label(root,text="enter the host IP:").grid(row=0,column=0)
label2 = Label(root,text="enter the port:").grid(row=1,column=0)
label = Label(root,text="enter the password:").grid(row=2,column=0)
ent = Entry(root,width=50,textvariable = hostIP).grid(row=0,column=1)
ent1 = Entry(root,width=50,textvariable = port).grid(row=1,column=1)
ent2 = Entry(root,width=50,textvariable = password).grid(row=2,column=1)
btn2 = Button(root, text="Connect", command=try_to_connect)
btn2.grid(row=3)
root.mainloop()
|
[
"noreply@github.com"
] |
AvivBenShoham.noreply@github.com
|
3348482129614559bf562009587fd0cd8f650a76
|
bf885fc079ee6b70baaddd22fd921637aa2e8e13
|
/arrays/kth_largests.py
|
7cea40fcc298e73c4ab53907a369e9f08818e008
|
[] |
no_license
|
ashish-bisht/Ds-and-Algo
|
006f7412a8b20cced7da261452864a09edd4b4db
|
a042127b5b51bcb632e5e0bf08b130b554f10c3f
|
refs/heads/master
| 2020-09-16T03:10:19.648329
| 2020-04-18T08:55:26
| 2020-04-18T08:55:26
| 223,632,306
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
import heapq
def kth_largest(nums, k):
return sorted(nums)[-k]
nums = [3, 2, 1, 5, 6, 4]
k = 2
print(kth_largest(nums, k))
def kth_largest_heap(nums, k):
heap = []
for num in nums:
heapq.heappush(heap, num)
for _ in range(len(nums)-k):
heapq.heappop(heap)
return heapq.heappop(heap)
print(kth_largest_heap(nums, k))
|
[
"ashishbisht723@gmail.com"
] |
ashishbisht723@gmail.com
|
2ef0da07a4bdf9994bd84d2d88cd026c975b1d7e
|
c9ee283fe1e5a2538d990c4399bd1f4c4091a985
|
/wordcount/views.py
|
8af5cdf5cc80851de1043ebb2922fcd2edb43700
|
[] |
no_license
|
xgarbagelab/wordcount-project
|
5cefce5e00807c945e84f44b3c0c86cf96169ad5
|
abc9767c75b3bf7d0de98260817eaea75bf57d6a
|
refs/heads/master
| 2020-03-09T10:38:45.229805
| 2018-04-09T09:01:35
| 2018-04-09T09:01:35
| 128,741,740
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 675
|
py
|
from django.http import HttpResponse
from django.shortcuts import render
def homepage(request):
#return HttpResponse('Hello'
return render(request,'home.html')
def count(request):
fulltext=request.GET['fulltext']
wordlist = fulltext.split()
word_dictionary={}
for word in wordlist:
if word in word_dictionary:
#Increase
word_dictionary[word]+=1
else:
#add to the dictionary
word_dictionary[word]=1
return render(request,'count.html',{'fulltext':fulltext,'count':len(wordlist),'word_dictionary':word_dictionary.items})
def about(request):
return render(request,'about.html')
|
[
"madhusudan707@gmail.com"
] |
madhusudan707@gmail.com
|
45cead8da98c2c33ffdf9692f4f051e3884b5fb6
|
d231cb5d54cb14bc4b40d5ba56709f80a1f9d4c1
|
/WMass/python/postprocessing/scripts/copyDir.py
|
a42a37dc1bdce2a94dc311a5e937a05cf2b180ba
|
[] |
no_license
|
WMass/cmgtools-lite
|
179d7083878b8cb1de14835b8c0a76a51bfd6a1e
|
025b7ec338ee7f574ad802a958ec6214995c2ac9
|
refs/heads/wmass94X
| 2022-07-03T13:09:16.634404
| 2020-08-31T06:39:42
| 2020-08-31T06:39:42
| 108,602,987
| 0
| 8
| null | 2022-02-04T23:04:41
| 2017-10-27T22:56:41
|
Python
|
UTF-8
|
Python
| false
| false
| 2,083
|
py
|
#!/usr/bin/env python
import os, sys
import re
if __name__ == "__main__":
from optparse import OptionParser
parser = OptionParser(usage="%prog [options] inputDir outputDir")
parser.add_option("-p", "--pretend", dest="pretend", action="store_true", default=False, help="Don't run anything");
parser.add_option("-v", "--verbose", dest="verbose", action="store_true", default=False, help="Use option -v for cp");
parser.add_option("-m", "--match", dest="match", type="string", default=None, help="Select only folders matching this regular expression")
parser.add_option("-r", "--replace", dest="replace", type="string", nargs=2, default=None, help="Replace first argument with second argument when copying folder or file")
(options, args) = parser.parse_args()
if len(args)<2:
parser.print_help()
sys.exit(1)
inputdir = args[0]
outputDir = args[1]
if inputdir == outputDir:
print "Error: input folder is the same as the output folder."
quit()
print "-"*30
print "Copying folders from inputdir %s to outputDir %s" % (inputdir, outputDir)
if options.match != None:
print "Selecting folders matching '{reg}'".format(reg=options.match)
if options.replace != None:
print "Changing name replacing '%s' with '%s'" % (options.replace[0], options.replace[1])
print "-"*30
if not os.path.exists(outputDir):
print "Warning: folder {out} does not exist".format(out=outputDir)
print "It will be created now"
os.makedirs(outputDir)
folders = os.listdir(inputdir)
for fld in folders:
if options.match == None or re.match(options.match,fld):
cmd = "cp -r{v} {ind}/{f} {out}/{newf}".format(v="v" if options.verbose else "",
ind=inputdir,f=fld, out=outputDir,
newf=fld if options.replace == None else fld.replace(options.replace[0], options.replace[1]))
if options.pretend:
print cmd
else:
os.system(cmd)
print "-"*30
print "DONE"
print "-"*30
|
[
"cippy91@gmail.com"
] |
cippy91@gmail.com
|
514ad159680cee8af058ad18c3d94bc9d7b281b8
|
6656aa66a16ae34bdb967dd502aadedf27dc2ddb
|
/file/palindrome.py
|
668a20b11822bc6f0c9254c631ad29a2ec24ba6b
|
[] |
no_license
|
Lucidhomme/Python
|
a7e654ba2b1abeea48a15d22d976681d3cdb40e5
|
b51ba56c0d74c9d820260885f1f5af04cdd99d9d
|
refs/heads/main
| 2022-12-24T16:34:28.336819
| 2020-10-09T15:08:48
| 2020-10-09T15:08:48
| 302,671,182
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,427
|
py
|
# 서비스패키지 밑 파일 패키지 밑 palindrome모듈생성, 파이썬 베이스밑에 palindrome caller모듈 생성
# 회문(palindrome)
# 단어를 거꾸로 읽어도 제대로 읽는 것과 같은 단어 또는 문장
# level, sos, rotator, 'nurses run'
# 기준점이 필요하다. 첫 글자와 마지막 글자를 비교
# 반복문
# //
# str = 'jslim9413'
# idx = len(str) // 2
# print(str[idx])
# 특정단어가 들어 왔을 때 이 단어가 회문인지 아닌지 검사하는 함수
def isPalindrome():
word = input("단어를 입력하세요 : ")
isFlag = True
for i in range(len(word) // 2 ) : #// 실수자리 두자리
if word[i] != word[-1 -i] :
isFlag = False
break
return isFlag
def reversePalindrome() :
word = input("단어를 입력하세요 : ")
# print(word == word[::-1])
# print( type(reversed(word)) )
# print(list(reversed(word)))
if list(word) == list(reversed(word)) :
print(True)
else :
print(False)
#단어가 줄단위로 저장된 파일에서 회문인 단어를 각 줄에 출력하라. 단) 파일에서 읽은 단어는 \n이 붙어 있으므로 \n을 제외한 뒤 회문인지를 판단하여야 함. 단어사이 줄 바꿈이 두번일어나면 안됨
def reverse_check() :
with open('./etc. word/palindrome_words.txt', 'r') as file :
for i in file :
i = i.strip('\n')
if list(i) == list(reversed(i)) :
print(i)
##강사님답
def palindromeExc():
with open('./etc. word/palindrome_words.txt', 'r') as file :
for line in file :
line = line.strip('\n')
if line == line[::-1] :
print(line)
# N-gram
# hello 문자단위로 추출한다면?
# he / el / li / lo
# 0 1 2 3 4
# h e l l o
# text = 'hello'
# for i in range(len(text)-1) :
# print(text[i], text[i+1], sep = "")
# 공백을 기준으로 문자열을 분리한다면 타입은??
# 리스트 - 정답입니다
# 리스트 2-gram 현재문자와 다음 문자를 출력하고 싶다면?
# text = 'this is python script'
# words = text.split()
# for i in range(len(words) -1) :
# print(words[i], words[i+1])
#zip()
number = [1,2,3,4]
name = ['a','b','c','d']
# number_name = dict(zip(number, name))
# print(number_name)
# dic = {}
# for number, name in zip(number, name):
# dic[number] = name
# print(dic)
# a = 'lim'
# b = [1,2,3]
# c = ('one','two','three')
# print(list(zip(a,b,c)))
# print(dict(zip(a,b)))
# print(set(zip(a,b,c)))
# input 함수를 이용해서 문자열이 입력되고
# 예시) Python is a programming language that lets you work quickly
# gram 할 숫자도 input 함수를 이용하여 입력받아
# 예시) 3
# 입력된 숫자에 해당하는 단어 n-gram을 튜플로 출력하라
# 단) 입력된 문자열의 단어개수가 입력된 정수미만이라면 예외를 발생시키고 처리한다
def zipNgram():
n = int(input("gram : "))
text = input("sentences : ")
words = text.split()
try :
if (len(words) < n ) :
raise Exception('문장의 길이가 더 길어야 합니다')
else :
for i in range(len(words) - n + 1) :
print(words[i:i+n])
except Exception as e:
print(e)
|
[
"noreply@github.com"
] |
Lucidhomme.noreply@github.com
|
6d587149211c6466c6606dfa59659a94f16ff35b
|
75a9c9a3f0c92b31e4682c3b452ca528858b521c
|
/kEdge/TestRegistration.py
|
58eb5613777c0c6fd5ef24a21c74190c86c492e7
|
[] |
no_license
|
DoctorEmmetBrown/Pagaille
|
1ccf58db6895c0035d8d48a142fce808661b4883
|
443635cab835281cc8a13c92fe5e898a52f72bfb
|
refs/heads/master
| 2020-04-29T17:08:53.597371
| 2019-07-18T15:03:02
| 2019-07-18T15:03:02
| 176,288,407
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,241
|
py
|
__author__ = 'embrun'
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import register_translation
from skimage.feature.register_translation import _upsampled_dft
from scipy.ndimage import fourier_shift
image = data.camera()
shift = (-2.4, 1.32)
# (-2.4, 1.32) pixel offset relative to reference coin
offset_image = fourier_shift(np.fft.fftn(image), shift)
offset_image = np.fft.ifftn(offset_image)
print("Known offset (y, x):")
print(shift)
# pixel precision first
print 'Pixel Precision'
shift, error, diffphase = register_translation(image, offset_image,100)
print 'Pixel Precision Done'
print 'Shift found'
print shift
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# View the output of a cross-correlation to show what the algorithm is
# doing behind the scenes
image_product = np.fft.fft2(image) * np.fft.fft2(offset_image).conj()
cc_image = np.fft.fftshift(np.fft.ifft2(image_product))
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Cross-correlation")
plt.show()
print("Detected pixel offset (y, x):")
print(shift)
# subpixel precision
shift, error, diffphase = register_translation(image, offset_image, 100)
fig = plt.figure(figsize=(8, 3))
ax1 = plt.subplot(1, 3, 1, adjustable='box-forced')
ax2 = plt.subplot(1, 3, 2, sharex=ax1, sharey=ax1, adjustable='box-forced')
ax3 = plt.subplot(1, 3, 3)
ax1.imshow(image)
ax1.set_axis_off()
ax1.set_title('Reference image')
ax2.imshow(offset_image.real)
ax2.set_axis_off()
ax2.set_title('Offset image')
# Calculate the upsampled DFT, again to show what the algorithm is doing
# behind the scenes. Constants correspond to calculated values in routine.
# See source code for details.
cc_image = _upsampled_dft(image_product, 150, 100, (shift*100)+75).conj()
ax3.imshow(cc_image.real)
ax3.set_axis_off()
ax3.set_title("Supersampled XC sub-area")
plt.show()
print("Detected subpixel offset (y, x):")
print(shift)
|
[
"ines.zaouak@gmail.com"
] |
ines.zaouak@gmail.com
|
a72647356ffc9aa6166402b5689ac88fec136576
|
b6fd722600334071987e5b25787d672d002d0d82
|
/restful_ajax/wsgi.py
|
169e87c6785a188c457fd1f55e3e4173bcd19c39
|
[
"MIT"
] |
permissive
|
Divinity360/VclinicApi
|
789be9c6d919d0985c9ce276d45556275579e5d7
|
7271e2143a1399aabad9e32c8538a29efe81d774
|
refs/heads/master
| 2022-12-09T21:36:15.752993
| 2020-02-05T22:03:51
| 2020-02-05T22:03:51
| 238,483,133
| 0
| 0
|
MIT
| 2022-12-08T06:18:30
| 2020-02-05T15:28:34
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 173
|
py
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "restful_ajax.settings")
application = get_wsgi_application()
|
[
"thedivinitysoft@gmail.com"
] |
thedivinitysoft@gmail.com
|
d205ecf93b379b1f946455216374e1b917657a8d
|
9d5e20547dbd675442f82952511506974b4c8e7b
|
/pyscf/pbc/df/mdf.py
|
fc784cffdbd7b54c06c749edb22f6b05e43347b6
|
[
"Apache-2.0"
] |
permissive
|
tyanderson176/pyscf
|
9072b68bf4c6544deec11cd259e67406bed6b2a6
|
a5c480338c4725d5ffb023e12266862bdee6b3aa
|
refs/heads/master
| 2020-08-22T17:05:37.115630
| 2019-10-20T23:59:23
| 2019-10-20T23:59:23
| 216,443,034
| 0
| 0
|
Apache-2.0
| 2019-10-20T23:48:44
| 2019-10-20T23:48:43
| null |
UTF-8
|
Python
| false
| false
| 17,446
|
py
|
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Gaussian and planewaves mixed density fitting
Ref:
J. Chem. Phys. 147, 164119 (2017)
'''
import os
import time
import tempfile
import numpy
import h5py
import scipy.linalg
from pyscf import lib
from pyscf.lib import logger
from pyscf.df.outcore import _guess_shell_ranges
from pyscf.pbc import tools
from pyscf.pbc import gto
from pyscf.pbc.df import outcore
from pyscf.pbc.df import ft_ao
from pyscf.pbc.df import df
from pyscf.pbc.df import aft
from pyscf.pbc.df.df import fuse_auxcell
from pyscf.pbc.df.df_jk import zdotNN, zdotCN, zdotNC
from pyscf.pbc.lib.kpts_helper import (is_zero, gamma_point, member, unique,
KPT_DIFF_TOL)
from pyscf.pbc.df import mdf_jk
from pyscf.pbc.df import mdf_ao2mo
from pyscf import __config__
# kpti == kptj: s2 symmetry
# kpti == kptj == 0 (gamma point): real
def _make_j3c(mydf, cell, auxcell, kptij_lst, cderi_file):
t1 = (time.clock(), time.time())
log = logger.Logger(mydf.stdout, mydf.verbose)
max_memory = max(2000, mydf.max_memory-lib.current_memory()[0])
fused_cell, fuse = fuse_auxcell(mydf, auxcell)
# Create swap file to avoid huge cderi_file. see also function
# pyscf.pbc.df.df._make_j3c
swapfile = tempfile.NamedTemporaryFile(dir=os.path.dirname(cderi_file))
fswap = lib.H5TmpFile(swapfile.name)
# Unlink swapfile to avoid trash
swapfile = None
outcore._aux_e2(cell, fused_cell, fswap, 'int3c2e', aosym='s2',
kptij_lst=kptij_lst, dataname='j3c-junk', max_memory=max_memory)
t1 = log.timer_debug1('3c2e', *t1)
nao = cell.nao_nr()
naux = auxcell.nao_nr()
mesh = mydf.mesh
Gv, Gvbase, kws = cell.get_Gv_weights(mesh)
b = cell.reciprocal_vectors()
gxyz = lib.cartesian_prod([numpy.arange(len(x)) for x in Gvbase])
ngrids = gxyz.shape[0]
kptis = kptij_lst[:,0]
kptjs = kptij_lst[:,1]
kpt_ji = kptjs - kptis
uniq_kpts, uniq_index, uniq_inverse = unique(kpt_ji)
log.debug('Num uniq kpts %d', len(uniq_kpts))
log.debug2('uniq_kpts %s', uniq_kpts)
# j2c ~ (-kpt_ji | kpt_ji)
j2c = fused_cell.pbc_intor('int2c2e', hermi=1, kpts=uniq_kpts)
for k, kpt in enumerate(uniq_kpts):
aoaux = ft_ao.ft_ao(fused_cell, Gv, None, b, gxyz, Gvbase, kpt).T
aoaux = fuse(aoaux)
coulG = mydf.weighted_coulG(kpt, False, mesh)
LkR = numpy.asarray(aoaux.real, order='C')
LkI = numpy.asarray(aoaux.imag, order='C')
j2c_k = fuse(fuse(j2c[k]).T).T.copy()
if is_zero(kpt): # kpti == kptj
j2c_k -= lib.dot(LkR*coulG, LkR.T)
j2c_k -= lib.dot(LkI*coulG, LkI.T)
else:
# aoaux ~ kpt_ij, aoaux.conj() ~ kpt_kl
j2cR, j2cI = zdotCN(LkR*coulG, LkI*coulG, LkR.T, LkI.T)
j2c_k -= j2cR + j2cI * 1j
fswap['j2c/%d'%k] = j2c_k
aoaux = LkR = LkI = j2cR = j2cI = coulG = None
j2c = None
def cholesky_decomposed_metric(uniq_kptji_id):
j2c = numpy.asarray(fswap['j2c/%d'%uniq_kptji_id])
j2c_negative = None
# Note large difference may be found in results between the CD/eig treatments.
# In some systems, small integral errors can lead to different treatments of
# linear dependency which can be observed in the total energy/orbital energy
# around 4th decimal place.
# try:
# j2c = scipy.linalg.cholesky(j2c, lower=True)
# j2ctag = 'CD'
# except scipy.linalg.LinAlgError as e:
#
# Abandon CD treatment for better numerical stablity
w, v = scipy.linalg.eigh(j2c)
log.debug('MDF metric for kpt %s cond = %.4g, drop %d bfns',
uniq_kptji_id, w[-1]/w[0], numpy.count_nonzero(w<mydf.linear_dep_threshold))
v1 = v[:,w>mydf.linear_dep_threshold].T.conj()
v1 /= numpy.sqrt(w[w>mydf.linear_dep_threshold]).reshape(-1,1)
j2c = v1
if cell.dimension == 2 and cell.low_dim_ft_type != 'inf_vacuum':
idx = numpy.where(w < -mydf.linear_dep_threshold)[0]
if len(idx) > 0:
j2c_negative = (v[:,idx]/numpy.sqrt(-w[idx])).conj().T
j2ctag = 'eig'
return j2c, j2c_negative, j2ctag
feri = h5py.File(cderi_file, 'a')
feri['j3c-kptij'] = kptij_lst
nsegs = len(fswap['j3c-junk/0'])
def make_kpt(uniq_kptji_id, cholesky_j2c): # kpt = kptj - kpti
kpt = uniq_kpts[uniq_kptji_id]
log.debug1('kpt = %s', kpt)
adapted_ji_idx = numpy.where(uniq_inverse == uniq_kptji_id)[0]
adapted_kptjs = kptjs[adapted_ji_idx]
nkptj = len(adapted_kptjs)
log.debug1('adapted_ji_idx = %s', adapted_ji_idx)
j2c, j2c_negative, j2ctag = cholesky_j2c
Gaux = ft_ao.ft_ao(fused_cell, Gv, None, b, gxyz, Gvbase, kpt).T
Gaux = fuse(Gaux)
Gaux *= mydf.weighted_coulG(kpt, False, mesh)
kLR = Gaux.T.real.copy('C')
kLI = Gaux.T.imag.copy('C')
if is_zero(kpt): # kpti == kptj
aosym = 's2'
nao_pair = nao*(nao+1)//2
if cell.dimension == 3:
vbar = fuse(mydf.auxbar(fused_cell))
ovlp = cell.pbc_intor('int1e_ovlp', hermi=1, kpts=adapted_kptjs)
ovlp = [lib.pack_tril(s) for s in ovlp]
else:
aosym = 's1'
nao_pair = nao**2
mem_now = lib.current_memory()[0]
log.debug2('memory = %s', mem_now)
max_memory = max(2000, mydf.max_memory-mem_now)
# nkptj for 3c-coulomb arrays plus 1 Lpq array
buflen = min(max(int(max_memory*.38e6/16/naux/(nkptj+1)), 1), nao_pair)
shranges = _guess_shell_ranges(cell, buflen, aosym)
buflen = max([x[2] for x in shranges])
# +1 for a pqkbuf
if aosym == 's2':
Gblksize = max(16, int(max_memory*.1e6/16/buflen/(nkptj+1)))
else:
Gblksize = max(16, int(max_memory*.2e6/16/buflen/(nkptj+1)))
Gblksize = min(Gblksize, ngrids, 16384)
pqkRbuf = numpy.empty(buflen*Gblksize)
pqkIbuf = numpy.empty(buflen*Gblksize)
# buf for ft_aopair
buf = numpy.empty((nkptj,buflen*Gblksize), dtype=numpy.complex128)
def pw_contract(istep, sh_range, j3cR, j3cI):
bstart, bend, ncol = sh_range
if aosym == 's2':
shls_slice = (bstart, bend, 0, bend)
else:
shls_slice = (bstart, bend, 0, cell.nbas)
for p0, p1 in lib.prange(0, ngrids, Gblksize):
dat = ft_ao._ft_aopair_kpts(cell, Gv[p0:p1], shls_slice, aosym,
b, gxyz[p0:p1], Gvbase, kpt,
adapted_kptjs, out=buf)
nG = p1 - p0
for k, ji in enumerate(adapted_ji_idx):
aoao = dat[k].reshape(nG,ncol)
pqkR = numpy.ndarray((ncol,nG), buffer=pqkRbuf)
pqkI = numpy.ndarray((ncol,nG), buffer=pqkIbuf)
pqkR[:] = aoao.real.T
pqkI[:] = aoao.imag.T
lib.dot(kLR[p0:p1].T, pqkR.T, -1, j3cR[k], 1)
lib.dot(kLI[p0:p1].T, pqkI.T, -1, j3cR[k], 1)
if not (is_zero(kpt) and gamma_point(adapted_kptjs[k])):
lib.dot(kLR[p0:p1].T, pqkI.T, -1, j3cI[k], 1)
lib.dot(kLI[p0:p1].T, pqkR.T, 1, j3cI[k], 1)
for k, ji in enumerate(adapted_ji_idx):
if is_zero(kpt) and gamma_point(adapted_kptjs[k]):
v = j3cR[k]
else:
v = j3cR[k] + j3cI[k] * 1j
if j2ctag == 'CD':
v = scipy.linalg.solve_triangular(j2c, v, lower=True, overwrite_b=True)
feri['j3c/%d/%d'%(ji,istep)] = v
else:
feri['j3c/%d/%d'%(ji,istep)] = lib.dot(j2c, v)
# low-dimension systems
if j2c_negative is not None:
feri['j3c-/%d/%d'%(ji,istep)] = lib.dot(j2c_negative, v)
with lib.call_in_background(pw_contract) as compute:
col1 = 0
for istep, sh_range in enumerate(shranges):
log.debug1('int3c2e [%d/%d], AO [%d:%d], ncol = %d', \
istep+1, len(shranges), *sh_range)
bstart, bend, ncol = sh_range
col0, col1 = col1, col1+ncol
j3cR = []
j3cI = []
for k, idx in enumerate(adapted_ji_idx):
v = [fswap['j3c-junk/%d/%d'%(idx,i)][0,col0:col1].T for i in range(nsegs)]
v = fuse(numpy.vstack(v))
if is_zero(kpt) and cell.dimension == 3:
for i in numpy.where(vbar != 0)[0]:
v[i] -= vbar[i] * ovlp[k][col0:col1]
j3cR.append(numpy.asarray(v.real, order='C'))
if is_zero(kpt) and gamma_point(adapted_kptjs[k]):
j3cI.append(None)
else:
j3cI.append(numpy.asarray(v.imag, order='C'))
v = None
compute(istep, sh_range, j3cR, j3cI)
for ji in adapted_ji_idx:
del(fswap['j3c-junk/%d'%ji])
# Wrapped around boundary and symmetry between k and -k can be used
# explicitly for the metric integrals. We consider this symmetry
# because it is used in the df_ao2mo module when contracting two 3-index
# integral tensors to the 4-index 2e integral tensor. If the symmetry
# related k-points are treated separately, the resultant 3-index tensors
# may have inconsistent dimension due to the numerial noise when handling
# linear dependency of j2c.
def conj_j2c(cholesky_j2c):
j2c, j2c_negative, j2ctag = cholesky_j2c
if j2c_negative is None:
return j2c.conj(), None, j2ctag
else:
return j2c.conj(), j2c_negative.conj(), j2ctag
a = cell.lattice_vectors() / (2*numpy.pi)
def kconserve_indices(kpt):
'''search which (kpts+kpt) satisfies momentum conservation'''
kdif = numpy.einsum('wx,ix->wi', a, uniq_kpts + kpt)
kdif_int = numpy.rint(kdif)
mask = numpy.einsum('wi->i', abs(kdif - kdif_int)) < KPT_DIFF_TOL
uniq_kptji_ids = numpy.where(mask)[0]
return uniq_kptji_ids
done = numpy.zeros(len(uniq_kpts), dtype=bool)
for k, kpt in enumerate(uniq_kpts):
if done[k]:
continue
log.debug1('Cholesky decomposition for j2c at kpt %s', k)
cholesky_j2c = cholesky_decomposed_metric(k)
# The k-point k' which has (k - k') * a = 2n pi. Metric integrals have the
# symmetry S = S
uniq_kptji_ids = kconserve_indices(-kpt)
log.debug1("Symmetry pattern (k - %s)*a= 2n pi", kpt)
log.debug1(" make_kpt for uniq_kptji_ids %s", uniq_kptji_ids)
for uniq_kptji_id in uniq_kptji_ids:
if not done[uniq_kptji_id]:
make_kpt(uniq_kptji_id, cholesky_j2c)
done[uniq_kptji_ids] = True
# The k-point k' which has (k + k') * a = 2n pi. Metric integrals have the
# symmetry S = S*
uniq_kptji_ids = kconserve_indices(kpt)
log.debug1("Symmetry pattern (k + %s)*a= 2n pi", kpt)
log.debug1(" make_kpt for %s", uniq_kptji_ids)
cholesky_j2c = conj_j2c(cholesky_j2c)
for uniq_kptji_id in uniq_kptji_ids:
if not done[uniq_kptji_id]:
make_kpt(uniq_kptji_id, cholesky_j2c)
done[uniq_kptji_ids] = True
feri.close()
# valence_exp = 1. are typically the Gaussians in the valence
VALENCE_EXP = getattr(__config__, 'pbc_df_mdf_valence_exp', 1.0)
def _mesh_for_valence(cell, valence_exp=VALENCE_EXP):
'''Energy cutoff estimation'''
precision = cell.precision * 10
Ecut_max = 0
for i in range(cell.nbas):
l = cell.bas_angular(i)
es = cell.bas_exp(i).copy()
es[es>valence_exp] = valence_exp
cs = abs(cell.bas_ctr_coeff(i)).max(axis=1)
ke_guess = gto.cell._estimate_ke_cutoff(es, l, cs, precision)
Ecut_max = max(Ecut_max, ke_guess.max())
mesh = tools.cutoff_to_mesh(cell.lattice_vectors(), Ecut_max)
mesh = numpy.min((mesh, cell.mesh), axis=0)
if cell.dimension < 2 or cell.low_dim_ft_type == 'inf_vacuum':
mesh[cell.dimension:] = cell.mesh[cell.dimension:]
return mesh
del(VALENCE_EXP)
class MDF(df.DF):
'''Gaussian and planewaves mixed density fitting
'''
def __init__(self, cell, kpts=numpy.zeros((1,3))):
self.cell = cell
self.stdout = cell.stdout
self.verbose = cell.verbose
self.max_memory = cell.max_memory
self.kpts = kpts # default is gamma point
self.kpts_band = None
self._auxbasis = None
self.mesh = _mesh_for_valence(cell)
# In MDF, fitting PWs (self.mesh), and parameters eta and exp_to_discard
# are related to each other. The compensated function does not need to
# be very smooth. It just needs to be expanded by the specified PWs
# (self.mesh). self.eta is estimated on the fly based on the value of
# self.mesh.
self.eta = None
# Any functions which are more diffused than the compensated Gaussian
# are linearly dependent to the PWs. They can be removed from the
# auxiliary set without affecting the accuracy of MDF. exp_to_discard
# can be set to the value of self.eta
self.exp_to_discard = None
# The following attributes are not input options.
self.exxdiv = None # to mimic KRHF/KUHF object in function get_coulG
self.auxcell = None
self.blockdim = getattr(__config__, 'df_df_DF_blockdim', 240)
self.linear_dep_threshold = df.LINEAR_DEP_THR
self._j_only = False
# If _cderi_to_save is specified, the 3C-integral tensor will be saved in this file.
self._cderi_to_save = tempfile.NamedTemporaryFile(dir=lib.param.TMPDIR)
# If _cderi is specified, the 3C-integral tensor will be read from this file
self._cderi = None
self._keys = set(self.__dict__.keys())
@property
def eta(self):
if self._eta is not None:
return self._eta
else:
cell = self.cell
if cell.dimension == 0:
return 0.2
ke_cutoff = tools.mesh_to_cutoff(cell.lattice_vectors(), self.mesh)
ke_cutoff = ke_cutoff[:cell.dimension].min()
return aft.estimate_eta_for_ke_cutoff(cell, ke_cutoff, cell.precision)
@eta.setter
def eta(self, x):
self._eta = x
@property
def exp_to_discard(self):
if self._exp_to_discard is not None:
return self._exp_to_discard
else:
return self.eta
@exp_to_discard.setter
def exp_to_discard(self, x):
self._exp_to_discard = x
_make_j3c = _make_j3c
# Note: Special exxdiv by default should not be used for an arbitrary
# input density matrix. When the df object was used with the molecular
# post-HF code, get_jk was often called with an incomplete DM (e.g. the
# core DM in CASCI). An SCF level exxdiv treatment is inadequate for
# post-HF methods.
def get_jk(self, dm, hermi=1, kpts=None, kpts_band=None,
with_j=True, with_k=True, exxdiv=None):
if kpts is None:
if numpy.all(self.kpts == 0):
# Gamma-point calculation by default
kpts = numpy.zeros(3)
else:
kpts = self.kpts
kpts = numpy.asarray(kpts)
if kpts.shape == (3,):
return mdf_jk.get_jk(self, dm, hermi, kpts, kpts_band, with_j,
with_k, exxdiv)
vj = vk = None
if with_k:
vk = mdf_jk.get_k_kpts(self, dm, hermi, kpts, kpts_band, exxdiv)
if with_j:
vj = mdf_jk.get_j_kpts(self, dm, hermi, kpts, kpts_band)
return vj, vk
get_eri = get_ao_eri = mdf_ao2mo.get_eri
ao2mo = get_mo_eri = mdf_ao2mo.general
ao2mo_7d = mdf_ao2mo.ao2mo_7d
def update_mp(self):
pass
def update_cc(self):
pass
def update(self):
pass
################################################################################
# With this function to mimic the molecular DF.loop function, the pbc gamma
# point DF object can be used in the molecular code
def loop(self, blksize=None):
for dat in aft.AFTDF.loop(self, blksize):
yield dat
for dat in df.DF.loop(self, blksize):
yield dat
def get_naoaux(self):
return df.DF.get_naoaux(self) + aft.AFTDF.get_naoaux(self)
|
[
"osirpt.sun@gmail.com"
] |
osirpt.sun@gmail.com
|
0dc2326977bac2dd04182a4f66eed643b31298e0
|
c7feb89550b180fb67f0d9dd0de342e271ee2f94
|
/src/metric.py
|
3caa2ff76773b28a59d02920424a79aabcec0496
|
[] |
no_license
|
nielsarts/energymonitoring
|
b8cb94f106ecdbfd7a36649c02d8236d9e08be01
|
0b9070069f8cada5b08b399e66b4e6916f3ef552
|
refs/heads/master
| 2020-05-01T16:35:17.629262
| 2018-08-11T22:40:44
| 2018-08-11T22:40:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,964
|
py
|
#!/usr/bin/python
# software based on
# TP-Link Wi-Fi Smart Plug Protocol Client
# For use with TP-Link HS-100 or HS-110
#
# by Lubomir Stroetmann
# Copyright 2016 softScheck GmbH
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# extended for prototyping purposes by Ed Kuijpers with support from French
# students during a summerschool
from __future__ import division
from prometheus_client import start_http_server, Summary, Gauge
from subprocess import PIPE, Popen
import random
import time
import psutil
import matplotlib.pyplot as plt
import json
import socket
import argparse
from struct import pack
version = 0.2
# Check if hostname is valid
def validHostname(hostname):
try:
socket.gethostbyname(hostname)
except socket.error:
parser.error("Invalid hostname.")
return hostname
# Predefined Smart Plug Commands
# For a full list of commands, consult tplink_commands.txt
commands = {
'info': '{"system":{"get_sysinfo":{}}}',
'on': '{"system":{"set_relay_state":{"state":1}}}',
'off': '{"system":{"set_relay_state":{"state":0}}}',
'cloudinfo': '{"cnCloud":{"get_info":{}}}',
'wlanscan': '{"netif":{"get_scaninfo":{"refresh":0}}}',
'time': '{"time":{"get_time":{}}}',
'schedule': '{"schedule":{"get_rules":{}}}',
'countdown': '{"count_down":{"get_rules":{}}}',
'antitheft': '{"anti_theft":{"get_rules":{}}}',
'reboot': '{"system":{"reboot":{"delay":1}}}',
'reset': '{"system":{"reset":{"delay":1}}}',
'energy': '{"emeter":{"get_realtime":{}}}'
}
# Encryption and Decryption of TP-Link Smart Home Protocol
# XOR Autokey Cipher with starting key = 171
def encrypt(string):
key = 171
result = pack('>I', len(string))
for i in string:
a = key ^ ord(i)
key = a
result += chr(a)
return result
def decrypt(string):
key = 171
result = ""
for i in string:
a = key ^ ord(i)
key = ord(i)
result += chr(a)
return result
# Parse commandline arguments
parser = argparse.ArgumentParser(
description="TP-Link Wi-Fi Smart Plug Client v" + str(version))
parser.add_argument(
"-t",
"--target",
metavar="<hostname>",
required=True,
help="Target hostname or IP address",
type=validHostname)
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument(
"-c",
"--command",
metavar="<command>",
help="Preset command to send. Choices are: " + ", ".join(commands),
choices=commands)
group.add_argument(
"-j",
"--json",
metavar="<JSON string>",
help="Full JSON string of command to send")
args = parser.parse_args()
# Set target IP, port and command to send
ip = args.target
port = 9999
if args.command is None:
cmd = args.json
cmd = commands['energy']
else:
cmd = commands[args.command]
def get_energy(cmd):
# Send command and receive reply
energy = 0.0
try:
sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_tcp.connect((ip, port))
sock_tcp.send(encrypt(cmd))
data = sock_tcp.recv(2048)
sock_tcp.close()
# print "Sent: ", cmd
# print "Received: ", decrypt(data[4:])
decode = decrypt(data[4:])
ddecode = json.loads(decode)
energy = 0.001 * float(ddecode['emeter']['get_realtime']['power_mw'])
print "energy = %f" % energy
except socket.error:
quit("Cound not connect to host " + ip + ":" + str(port))
return (energy)
REQUEST_TIME = Summary('request_processing_seconds',
'Time spent processing request')
g4 = Gauge('HS110_Energy', 'Description of gauge')
def get_cpu_temperature():
process = Popen(['./sensor_par.py', 'Physical id 0'], stdout=PIPE)
output, _error = process.communicate()
print 'temp = %s' % (output[output.index('=') + 1:-1 + output.rindex("")])
return float(output[output.index('=') + 1:-1 + output.rindex("")])
g3 = Gauge('Temperature', 'Description of gauge')
g3.set(get_cpu_temperature())
# Decorate function with metric.
@REQUEST_TIME.time()
def process_request(t):
"""A dummy function that takes some time."""
time.sleep(t)
if __name__ == '__main__':
# Start up the server to expose the metrics.
start_http_server(8000)
# Generate some requests.
while True:
process_request(random.random())
g4.set(get_energy(cmd))
g3.set(get_cpu_temperature())
|
[
"e.a.kuijpers@hva.nl"
] |
e.a.kuijpers@hva.nl
|
a9b05d089d341d0a3b1eab6aff004a19bb131807
|
9972d0d7f5678963344da089f5cdeb033a97db46
|
/Py/pg_karpathy_keras_kreplication.py
|
58a19b28a746a957e7a7dd346817411dee673e05
|
[] |
no_license
|
blakecc/RLworkingFolder
|
e00fbc7ee60295ad7ea47ebb46f6451a24766982
|
85affc56809ab9616b4fa4111c627e4357a0a9a2
|
refs/heads/master
| 2021-09-07T22:41:40.258030
| 2018-03-02T11:03:51
| 2018-03-02T11:03:51
| 122,582,057
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 12,438
|
py
|
import numpy as np
import _pickle as pickle
import gym
import os
from keras import layers
from keras.models import Model
from keras import backend as K
from keras import utils as np_utils
from keras import optimizers
# from keras.layers import advanced_activations
# from keras.layers.convolutional import Conv2D
# from keras.layers.pooling import MaxPooling2D
# from keras.layers.normalization import BatchNormalization
# from tensorflow.nn import l2_normalize
# import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.initializers import glorot_uniform
# hyperparameters
batchSize = 2048 # every how many episodes to do a param update?
learning_rate = 1e-4
gamma = 0.99 # discount factor for reward
decay_rate = 0.99 # decay factor for RMSProp leaky sum of grad^2
epsilon = 1e-5
resume = False # resume from previous checkpoint?
render = False
ep_limit = 300001
k_hidden_dims = 200
game_history = []
D = 80 * 80 # input dimensionality: 80x80 grid
def prepro(I):
""" prepro 210x160x3 uint8 frame into 6400 (80x80) 1D float vector """
I = I[35:195] # crop
I = I[::2,::2,0] # downsample by factor of 2
I[I == 144] = 0 # erase background (background type 1)
I[I == 109] = 0 # erase background (background type 2)
I[I != 0] = 1 # everything else (paddles, ball) just set to 1
return I.astype(np.float).ravel()
# test_obs = prepro(observation)
#
# test_obs.shape
#
# from matplotlib import pyplot as plt
# plt.imshow(test_obs, interpolation='nearest')
# plt.show()
def discount_rewards(r):
""" take 1D float array of rewards and compute discounted reward """
discounted_r = np.zeros_like(r)
running_add = 0
for t in reversed(range(0, r.size)):
if r[t] != 0: running_add = 0 # reset the sum, since this was a game boundary (pong specific!)
running_add = running_add * gamma + r[t]
discounted_r[t] = running_add
return discounted_r
# def build_network(input_dim, output_dim, hidden_dims=[32, 32], batch_size = 8):
# """Create a base network"""
#
# visible = layers.Input(shape=(input_dim,))
# # visible = layers.Input(batch_shape = (None, input_dim[0], input_dim[1], input_dim[2]))
# # bnorm1 = BatchNormalization()(visible)
# # conv1 = Conv2D(8, kernel_size=8, activation='relu', padding = "same")(visible)
# # pool1 = MaxPooling2D(pool_size=(4, 4), padding = "same")(conv1)
# # net = Conv2D(16, kernel_size=4, activation='relu')(net)
# # net = MaxPooling2D(pool_size=(2, 2))(net)
#
# # flat1 = layers.Flatten()(pool1)
#
# hidden1 = layers.Dense(200)(visible)
# # relu1 = layers.advanced_activations.LeakyReLU()(hidden1)
# relu1 = layers.Activation("relu")(hidden1)
#
# # for h_dim in hidden_dims:
# # net = layers.Dense(h_dim)(net)
# # net = layers.advanced_activations.LeakyReLU()(net)
# # # net = layers.Activation("relu")(net)
# # # net = layers.Dropout(rate = 0.2)(net)
#
# output = layers.Dense(output_dim, activation = "softmax")(relu1)
# # net = layers.Activation("softmax")(net)
#
# model = Model(inputs=visible, outputs=output)
#
# return model
# def customLoss(y_true, y_pred):
# # TODO: change 2 to an automatic function
# action_1h = K.one_hot(K.argmax(K.abs(y_true), axis = 1), 2)
# y_hat = action_1h * y_pred
# sy_hat = K.sum(y_hat, axis = 1)
# ly_hat = K.log(sy_hat)
# ly_hat = K.cast(ly_hat, dtype = 'float32')
# depr = K.sum(y_true, axis = 1)
# depr = K.cast(depr, dtype = 'float32')
# loss = -1 * ly_hat * depr
# losssum = K.sum(loss, axis = 0)
# return losssum
def customLoss(y_true, y_pred):
loss = -1 * K.mean(y_true * K.log(y_pred))
return loss
# action_1h = K.one_hot(K.argmax(K.abs(fake_labels), axis = 1), 2)
# y_pred = kmodel.predict(epx)
# y_hat = action_1h * y_pred
# sy_hat = K.sum(y_hat, axis = 1)
# ly_hat = K.log(sy_hat)
# depr = K.sum(fake_labels, axis = 1)
# depr = K.cast(depr, dtype = 'float32')
# loss = -1 * ly_hat * depr
# losssum = K.sum(loss, axis = 0)
def build_network(input_dim, output_dim, hidden_dims=k_hidden_dims, lrate = learning_rate, drate = decay_rate, eps = epsilon):
"""Create a base network"""
model = Sequential()
model.add(Dense(hidden_dims, kernel_initializer = glorot_uniform(), activation='relu', input_dim=input_dim))
model.add(Dense(output_dim, kernel_initializer = glorot_uniform(), activation='softmax'))
rmsprop = optimizers.RMSprop(lr = lrate, rho = drate, epsilon = eps) #Rho is actually what Karpathy thinks of as decay ... in keras decay is the learning rate decay which is not relevant for us
model.compile(optimizer = rmsprop,
loss = customLoss,
metrics = ['acc'])
return model
# def build_train_fn(model, output_dim):
# """Create a train function
#
# It replaces `model.fit(X, y)` because we use the output of model and use it for training.
#
# For example, we need action placeholder
# called `action_one_hot` that stores, which action we took at state `s`.
# Hence, we can update the same action.
#
# This function will create
# `self.train_fn([state, action_one_hot, discount_reward])`
# which would train the model.
#
# """
# action_prob_placeholder = model.output
# action_onehot_placeholder = K.placeholder(shape=(None, output_dim),
# name="action_onehot")
# discount_reward_placeholder = K.placeholder(shape=(None,1),
# name="discount_reward")
#
# action_prob = K.sum(action_prob_placeholder * action_onehot_placeholder, axis=1)
# log_action_prob = K.log(action_prob)
#
# loss = - log_action_prob * discount_reward_placeholder
# loss = K.mean(loss)
#
# # TODO: adjust parameters more in line with Karpathy
# rmsprop1 = optimizers.RMSprop()
#
# updates = rmsprop1.get_updates(params=model.trainable_weights,
# # constraints=[],
# loss=loss)
#
# train_fn = K.function(inputs=[model.input,
# action_onehot_placeholder,
# discount_reward_placeholder],
# outputs=[],
# updates=updates)
# return train_fn
env = gym.make("Pong-v0")
observation = env.reset()
prev_x = None # used in computing the difference frame
xs,drs,acts = [],[],[]
running_reward = None
# k_input_shape = np.expand_dims(prepro(observation), axis = 2).shape
k_input_shape = prepro(observation).shape[0]
# k_output_shape = env.action_space.n
k_output_shape = 2
# env.action_space.n
if resume:
kmodel = build_network(input_dim = k_input_shape, output_dim = k_output_shape, hidden_dims = k_hidden_dims)
kmodel.load_weights('save_k_01.h5')
game_history = pickle.load(open('game_history_k_01.p', 'rb'))
episode_number = game_history[-1][0]
reward_sum = 0
else:
episode_number = 0
reward_sum = 0
game_history = []
kmodel = build_network(input_dim = k_input_shape, output_dim = k_output_shape, hidden_dims = k_hidden_dims)
# env.render()
# train_func = build_train_fn(kmodel, k_output_shape)
# done = False
while True:
# while not done:
if render: env.render()
# for _ in range(10):
# preprocess the observation, set input to network to be difference image
cur_x = prepro(observation)
x = cur_x - prev_x if prev_x is not None else np.zeros(k_input_shape)
x = np.expand_dims(x, axis = 0)
prev_x = cur_x
# prev_x = None
# x.shape
# np.expand_dims(x, axis = 2)
# test_x = np.expand_dims(np.expand_dims(x, axis = 2), axis = 0)
# test_x = np.expand_dims(np.expand_dims(x, axis = 0), axis = 0)
# np.squeeze(x)
# aprob.shape
# test_x.shape
# kmodel.summary()
# forward the policy network and sample an action from the returned probability
# aprob = np.squeeze(kmodel.predict(np.expand_dims(np.expand_dims(x, axis = 0), axis = 0)))
aprob = np.squeeze(kmodel.predict(x))
# exploration vs. exploitation
action = np.random.choice(np.arange(k_output_shape), p=aprob)
action_onehot = np_utils.to_categorical(action, num_classes=k_output_shape)
# record various intermediates (needed later for backprop)
xs.append(x) # observation
acts.append(action_onehot)
# step the environment and get new measurements
observation, reward, done, info = env.step(action + 2)
reward_sum += reward
drs.append(reward) # record reward (has to be done after we call step() to get reward for previous action)
# endvar = 0
#
# import matplotlib.pyplot as plt
#
# endvar = 0
if done: # an episode finished
game_history.append((episode_number, reward_sum))
episode_number += 1
pickle.dump(game_history, open('game_history_k_01.p', 'wb'))
# stack together all inputs, hidden states, action gradients, and rewards for this episode
# epx = np.vstack(xs)
#eph = np.vstack(hs)
# epdlogp = np.vstack(dlogps)
# epr = np.vstack(drs)
# # compute the discounted reward backwards through time
# discounted_epr = discount_rewards(epr)
# # # standardize the rewards to be unit normal (helps control the gradient estimator variance)
# discounted_epr -= np.mean(discounted_epr)
# discounted_epr /= np.std(discounted_epr)
#
# epdlogp *= discounted_epr # modulate the gradient with advantage (PG magic happens right here.)
# eacts = np.vstack(acts)
#
# plt.plot(eacts[:,1])
# plt.show()
#
# epx.shape
# epx[0].sum()
#
# eacts[:,1].sum()
# batchTotal = epx.shape[0]
# batchStart = 0
# if batchTotal > batchSize:
# batchSteps = int(np.floor(batchTotal / batchSize))
# for num_steps in range(batchSteps):
# # print(eacts[batchStart:(batchStart + batchSize)])
# train_func([epx[batchStart:(batchStart + batchSize)], eacts[batchStart:(batchStart + batchSize)], discounted_epr[batchStart:(batchStart + batchSize)]])
# batchStart +=batchSize
# if batchSize < batchTotal:
# # print(eacts[batchStart:batchTotal])
# train_func([epx[batchStart:batchTotal], eacts[batchStart:batchTotal], discounted_epr[batchStart:batchTotal]])
#
# if batchTotal > batchSize:
# batchSteps = int(np.floor(batchTotal / batchSize))
# for num_steps in range(batchSteps):
# batch_obs = np.random.choice(range(batchTotal), batchSize)
# # print(eacts[batchStart:(batchStart + batchSize)])
# train_func([epx[batch_obs,], eacts[batch_obs,], discounted_epr[batch_obs,]])
# # batchStart +=batchSize
# # if batchSize < batchTotal:
# # # print(eacts[batchStart:batchTotal])
# # train_func([epx[batchStart:batchTotal], eacts[batchStart:batchTotal], discounted_epr[batchStart:batchTotal]])
# boring book-keeping
running_reward = reward_sum if running_reward is None else running_reward * 0.99 + reward_sum * 0.01
print('resetting env. episode {} reward total was {}. running mean: {}'.format(episode_number, reward_sum, running_reward))
# train_func([epx, eacts, discounted_epr])
if episode_number % 10 == 0:
epx = np.vstack(xs)
epr = np.vstack(drs)
discounted_epr = discount_rewards(epr)
discounted_epr -= np.mean(discounted_epr)
discounted_epr /= np.std(discounted_epr)
eacts = np.vstack(acts)
fake_labels = eacts * discounted_epr
kmodel.fit(epx, fake_labels, epochs = 1, batch_size=batchSize, verbose = False)
xs,drs,acts = [],[],[] # reset array memory
if episode_number % 20 == 0:
check_weight_1 = np.asarray(kmodel.get_weights()[1])[0].sum()
check_weight_2 = np.asarray(kmodel.get_weights()[3])[0].sum()
print('Weight check 1: {}, weight check 2: {}'.format(check_weight_1, check_weight_2))
if episode_number % 100 == 0: kmodel.save_weights('save_k_01.h5')
reward_sum = 0
observation = env.reset() # reset env
prev_x = None
# if reward != 0: # Pong has either +1 or -1 reward exactly when game ends.
# print ('ep {}: game finished, reward: {}'.format(episode_number, reward) + ('' if reward == -1 else ' !!!!!!!!'))
if episode_number == ep_limit:
os._exit()
|
[
"blake.rsa@gmail.com"
] |
blake.rsa@gmail.com
|
1cab05e8df2ef56696b693b546524a113f199941
|
9923b156c529a38088c05cdd7d4b454b93603446
|
/tests/test_provider_events_monitor.py
|
751fe7545b9815bed4bdfc630af8821cd1d08bca
|
[
"Apache-2.0"
] |
permissive
|
nevermined-io/gateway-events
|
fb9f7eefb3c34f9639bc2fa54062649efc2f74b4
|
5e70ad7ee7f9964314f21b5dfc60bb241f964344
|
refs/heads/master
| 2023-03-15T08:47:58.358646
| 2020-11-06T16:16:17
| 2020-11-06T16:16:17
| 257,318,216
| 0
| 0
|
Apache-2.0
| 2021-03-26T00:37:57
| 2020-04-20T15:08:07
|
Python
|
UTF-8
|
Python
| false
| false
| 4,408
|
py
|
import time
from contracts_lib_py.web3_provider import Web3Provider
from common_utils_py.agreements.service_agreement import ServiceAgreement
from nevermined_gateway_events.provider_events_monitor import ProviderEventsMonitor
from tests.conftest import get_consumer_account
from tests.resources.keeper_helpers import (get_conditions_status, get_registered_ddo, grant_access,
lock_reward, place_order)
def test_init_events_monitor(keeper, web3, storage_path, provider_account):
events_monitor = ProviderEventsMonitor(keeper, web3, storage_path, provider_account)
assert events_monitor.last_n_blocks == events_monitor.LAST_N_BLOCKS
assert (Web3Provider.get_web3().eth.blockNumber - events_monitor.latest_block) < 5
assert events_monitor.last_processed_block == 0
def test_process_pending_agreements(keeper, web3, storage_path, provider_account):
start_time = 0
ddo = get_registered_ddo(provider_account, providers=[provider_account.address])
did = ddo.did
consumer = get_consumer_account()
block_number = web3.eth.blockNumber
block_number = block_number - 10 if block_number > 10 else block_number
metadata = ddo.metadata
encrypted_files = metadata['encryptedFiles']
sa = ServiceAgreement.from_ddo('access', ddo)
agr_1 = place_order(provider_account, ddo, consumer)
agr_2 = place_order(provider_account, ddo, consumer)
agr_3 = place_order(provider_account, ddo, consumer)
pending_agreements = {
agr_1: [
did, 3, sa.get_price(), encrypted_files, start_time,
consumer.address, block_number, 'access'
],
agr_2: [
did, 3, sa.get_price(), encrypted_files, start_time + 3000,
consumer.address, block_number, 'access'
],
agr_3: [
did, 3, sa.get_price(), encrypted_files, start_time + 10000,
consumer.address, block_number, 'access'
]
}
conditions = {
agr_1: {'accessSecretStore': 1, 'lockReward': 2, 'escrowReward': 1},
agr_2: {'accessSecretStore': 1, 'lockReward': 1, 'escrowReward': 1},
agr_3: {'accessSecretStore': 2, 'lockReward': 2, 'escrowReward': 1}
}
balance = keeper.token.get_token_balance(consumer.address) / (2 ** 18)
if balance < 20:
keeper.dispenser.request_tokens(100, consumer)
lock_reward(agr_1, sa, consumer)
lock_reward(agr_3, sa, consumer)
grant_access(agr_3, ddo, consumer, provider_account)
event = keeper.access_secret_store_condition.subscribe_condition_fulfilled(
agr_3, 35, None, (), wait=True
)
if not event:
# check status
cond_to_status = get_conditions_status(agr_3)
print(f'agr_3 condition status: {cond_to_status}')
if cond_to_status['accessSecretStore'] != 2:
raise AssertionError(f'grant access failed for agreement {agr_3}')
events_monitor = ProviderEventsMonitor(keeper, web3, storage_path, provider_account)
events_monitor.process_pending_agreements(pending_agreements, conditions)
keeper.access_secret_store_condition.subscribe_condition_fulfilled(
agr_1, 15, None, (), wait=True
)
keeper.escrow_reward_condition.subscribe_condition_fulfilled(
agr_1, 15, None, (), wait=True
)
keeper.escrow_reward_condition.subscribe_condition_fulfilled(
agr_3, 15, None, (), wait=True
)
# check status of all agreements
for agr_id in (agr_1, agr_3):
cond_to_status = get_conditions_status(agr_1)
assert [2, 2, 2] == list(cond_to_status.values()), \
f'agr_id {agr_id}: some conditions were not fulfilled or ' \
f'do not match the expected status. Conditions status are: {cond_to_status}'
events_monitor.start_agreement_events_monitor()
lock_reward(agr_2, sa, consumer)
keeper.access_secret_store_condition.subscribe_condition_fulfilled(
agr_2, 15, None, (), wait=True
)
keeper.escrow_reward_condition.subscribe_condition_fulfilled(
agr_2, 15, None, (), wait=True
)
cond_to_status = get_conditions_status(agr_2)
assert [2, 2, 2] == list(cond_to_status.values()), \
f'agr_id {agr_id}: some conditions were not fulfilled or ' \
f'do not match the expected status. Conditions status are: {cond_to_status}'
events_monitor.stop_monitor()
time.sleep(2)
|
[
"enrique@keyko.io"
] |
enrique@keyko.io
|
a2a9fceea4e11f10be4c0e8af2593734c4b133ee
|
5095200e9ca55cd3a37af34ed44448c02e2a1bb5
|
/paddlehub/compat/task/config.py
|
f464cce162899582ffaf7ac57e1505e14f045507
|
[
"Apache-2.0"
] |
permissive
|
PaddlePaddle/PaddleHub
|
8712603ef486c45e83eb0bc5725b0b3ed3ddbbde
|
b402610a6f0b382a978e82473b541ea1fc6cf09a
|
refs/heads/develop
| 2023-07-24T06:03:13.172978
| 2023-03-28T11:49:55
| 2023-03-28T11:49:55
| 162,672,577
| 12,914
| 2,239
|
Apache-2.0
| 2023-07-06T21:38:19
| 2018-12-21T06:00:48
|
Python
|
UTF-8
|
Python
| false
| false
| 1,982
|
py
|
# coding:utf-8
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from typing import Callable
class RunConfig(object):
''' This class specifies the configurations for PaddleHub to finetune '''
def __init__(self,
log_interval: int = 10,
eval_interval: int = 100,
use_data_parallel: bool = True,
save_ckpt_interval: int = None,
use_cuda: bool = True,
checkpoint_dir: str = None,
num_epoch: int = 1,
batch_size: int = 32,
strategy: Callable = None):
''' Construct finetune Config '''
self.log_interval = log_interval
self.eval_interval = eval_interval
self.save_ckpt_interval = save_ckpt_interval
self.use_cuda = use_cuda
self.num_epoch = num_epoch
self.batch_size = batch_size
self.use_data_parallel = use_data_parallel
if checkpoint_dir is None:
now = int(time.time())
time_str = time.strftime('%Y%m%d%H%M%S', time.localtime(now))
self.checkpoint_dir = 'ckpt_' + time_str
else:
self.checkpoint_dir = checkpoint_dir
def __repr__(self):
return 'config with num_epoch={}, batch_size={}, use_cuda={}, checkpoint_dir={} '.format(
self.num_epoch, self.batch_size, self.use_cuda, self.checkpoint_dir)
|
[
"wuzewu@baidu.com"
] |
wuzewu@baidu.com
|
2455f838db84ea0b765dedc72fed46711631ee90
|
2a73b2be1379b368d675d641db4254dd2a7f202b
|
/django_app/member/views/profile.py
|
2e39e04f473f03b9a7a6e4418c7fc70372dcb03a
|
[] |
no_license
|
recordingbetter/Instagram-practice
|
610dcb5417657ed80b35848d14053d4afe9651fb
|
5f0142dc468d2e74778f463d6f0ec58892afcdf6
|
refs/heads/master
| 2022-12-14T01:33:19.161868
| 2017-08-03T02:22:40
| 2017-08-03T02:22:40
| 93,719,756
| 1
| 0
| null | 2022-12-08T00:00:14
| 2017-06-08T07:26:10
|
Python
|
UTF-8
|
Python
| false
| false
| 2,647
|
py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from ..forms import UserEditForm
User = get_user_model()
__all__ = (
'profile',
'profile_edit',
)
def profile(request, user_pk=None):
if not request.user.is_authenticated and not user_pk:
login_url = reverse('member:login')
return redirect(login_url + "?next=" + request.get_full_path())
num_posts_per_page = 3
# 0. urls_views.py 연결
# 1. user_pk에 해당하는 User를 cur_user키로 render
# 2. member/profile.html 작성, 해당 user 정보 보여주기
# 2-1. 해당 user의 followers, following 목록 보여주기
# 3. 현재 로그인한 유저가 해당 유저(cur_user)를 팔로우하고있는지 여부 보여주기
# 3-1. 팔로우하고 있다면 '팔로우해제' 버튼, 아니라면 '팔로우' 버튼 보여주기
# 4. -> def follow_toggle(request)뷰 생성
# user = User.objects.get(pk=user_pk)
page = request.GET.get('page', 1)
try:
page = int(page) if int(page) > 1 else 1
except ValueError:
page = 1
except Exception as e:
page = 1
print(e)
if user_pk:
user = get_object_or_404(User, pk=user_pk)
else:
user = request.user
# posts는 page 번호에 따라 9개씩 전달
# filter나 order_by에는 안써도 됨
posts = user.post_set.order_by('-created_date')[: num_posts_per_page * page]
post_count = user.post_set.count()
# next
next_page = page + 1 if post_count > page * num_posts_per_page else page + 1
context = {
'cur_user': user,
'posts': posts,
'post_count': post_count,
'page': page,
'next_page': next_page,
}
return render(request, 'member/profile.html', context)
@login_required
# @require_POST
def profile_edit(request):
'''
request.method = 'POST" 일때
nickname과 img_profile(모델에 필드 추가)을 수정할수있는
UserEditForm을 구성 (ModelForm 상속) 및 사용
'''
user = request.user
if request.method == 'POST':
form = UserEditForm(
data=request.POST,
files=request.FILES,
instance=user
)
if form.is_valid():
form.save()
return redirect('member:my_profile')
else:
form = UserEditForm(instance=user)
context = {
'form': form
}
return render(request, 'member/profile_edit.html', context=context)
|
[
"recordingbetter@gmail.com"
] |
recordingbetter@gmail.com
|
bfdb6e1d74f2f88ec6366964c86e81f107fcfa47
|
cbade68fac51a0228aad4414c1e06bde6611e36a
|
/venv/bin/easy_install-3.8
|
9b9e302a5c00e7f53eaacc6bf7e9e725fb594d13
|
[] |
no_license
|
Roger-Aguiar/python_plus_postgresql
|
04483f47f176db766a664dcc94292d4d763ce070
|
f06113ba70014bcddfb99d8c196e429ce2820f4d
|
refs/heads/master
| 2022-11-25T18:45:45.152608
| 2020-08-01T09:51:52
| 2020-08-01T09:51:52
| 283,590,876
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 293
|
8
|
#!/home/roger/Desktop/Files/Studies/Python/Python_with_postgresql/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"roger.silvaaguiar@yahoo.com.br"
] |
roger.silvaaguiar@yahoo.com.br
|
8aa8a71d1233dc911af7a391b8b3275700a57763
|
de764e803d7975fa231e109b74446dd1d5acc033
|
/make_data/scratch.py
|
579f73337d842635c3426ca8ae4ac7865b491a5f
|
[] |
no_license
|
cjohnst5/akm_codePYTHON
|
e5a1e064de7f09b0bb0194d1be3306a5c9dda293
|
fdaa39b8a4a4588aac34ae5a52aa424e284b1b8c
|
refs/heads/master
| 2021-01-19T07:46:25.940305
| 2017-04-13T21:06:25
| 2017-04-13T21:06:25
| 87,570,784
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 343
|
py
|
from sklearn.neighbors import NearestNeighbors
import numpy as np
X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
nbrs = NearestNeighbors(n_neighbors=2, algorithm='ball_tree').fit(X)
distances, indices = nbrs.kneighbors(X)
row = range(0,6)
column = [0, 1, 1, 1, 0, 0]
indices2 = zip(row, column)
print indices[indices2]
|
[
"Carla Johnston"
] |
Carla Johnston
|
e7d04e8f27af5bd5ec4a2388ad5cd8b583dee39a
|
23735e41c2003c7a5033d710be75b72b69081423
|
/backend/run.py
|
96fc8e0c6b6ba66a1a79634e260d7bb1c2dd8355
|
[] |
no_license
|
mhb8436/angular-flask-seed
|
2f2088ed41b53331f741ce84b9e73e02b35ea48a
|
fa4e7880c0dfb3d0d0278de2ce213c8699470ca3
|
refs/heads/master
| 2020-12-03T10:27:06.665553
| 2014-05-12T09:36:46
| 2014-05-12T09:36:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 226
|
py
|
#!/usr/bin/env python
from server import app, db
if __name__ == "__main__":
# print app
# print app.config['SQLALCHEMY_DATABASE_URI']
app.debug = True
db.create_all(app=app)
app.run(host='0.0.0.0',port=5001, debug=True)
|
[
"mhb8436@airplug.com"
] |
mhb8436@airplug.com
|
15db74b3a5783b25bd95000b3c44f4a50e4d509d
|
f182e4b941ae17dc867ee9ef363e034235fd5823
|
/python/CoordinatesHelper.py
|
59b78bdaa40855d2dc8087a7c7c710e3911bc7bf
|
[
"MIT"
] |
permissive
|
aquenneville/siliconmilkroundabout-company-parser
|
693821ce39e053ee3fbf4e2eed8191d9bd0639eb
|
14065ba7e075f102d269833e88fb5e84e8276412
|
refs/heads/master
| 2020-03-18T05:06:56.491046
| 2018-06-15T10:14:03
| 2018-06-15T10:14:03
| 134,325,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
import urllib.request
import json
Class PostcodeToCoordinateHelper:
def convert(postcode):
res = urllib.request.urlopen("http://api.postcodes.io/postcodes/SE18XX").read()
data = json.loads(res)
c = Coordinates(data["result"]["longitude"], data["result"]["latitude"])
return c
def createGoogleMapsLink(Coordinate x, Coordinate y):
# https://developers.google.com/maps/documentation/urls/guide
return "https://www.google.com/maps/dir/?api=1&origin="+x.latitude+","+x.longitude+"&destination="+y.latitude+","+y.longitude
|
[
"alainquenneville@gmail.com"
] |
alainquenneville@gmail.com
|
578c5fc2c8be63b7f3953a5c391eaeb6c36f8f12
|
5ba0f5e77c3cc6782683387386490ce359db0ed8
|
/bin/f2py3
|
3b295d185c6d312ddbdcef2aaddc0d028625c4af
|
[] |
no_license
|
0aksinmarathon/blog_api_jwt
|
e273c9865070a4614b8e8d971ca06d58005ed3be
|
b420abd4f503872ca8e6941d0631ed25d7d6b7ab
|
refs/heads/master
| 2023-02-13T09:05:39.719367
| 2021-01-10T09:18:40
| 2021-01-10T09:18:40
| 328,342,894
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 233
|
#!/Users/manako/blog_api/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from numpy.f2py.f2py2e import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"shintaro.manako@gmail.com"
] |
shintaro.manako@gmail.com
|
|
c4b7118b2880818c79cc6b237a64b43d935b9dfe
|
0b842bcb3bf20e1ce628d39bf7e11abd7699baf9
|
/oscar/a/api/yang/modules/tech/content/qwilt_tech_content_line/tech/content/line/analyzer/potential/potential_maapi_base_gen.py
|
7cd9cd20b07f97439aaca55200d85426178ed85f
|
[] |
no_license
|
afeset/miner2-tools
|
75cc8cdee06222e0d81e39a34f621399e1ceadee
|
81bcc74fe7c0ca036ec483f634d7be0bab19a6d0
|
refs/heads/master
| 2016-09-05T12:50:58.228698
| 2013-08-27T21:09:56
| 2013-08-27T21:09:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,492
|
py
|
# Copyright Qwilt, 2012
#
# The code contained in this file may not be used by any other entities without explicit written permission from Qwilt.
#
# Author: naamas
from a.infra.basic.return_codes import ReturnCodes
class PotentialMaapiBase(object):
def __init__ (self, logger):
raise NotImplementedError()
def init (self, domain):
raise NotImplementedError()
def requestConfigAndOper (self):
raise NotImplementedError()
def clearAllSet (self):
raise NotImplementedError()
def write (self
, line
, trxContext=None
):
raise NotImplementedError()
def read (self
, line
, trxContext=None):
raise NotImplementedError()
def readAllOrFail (self
, line
, trxContext=None):
raise NotImplementedError()
# descendants
# methodList
def newMethodList (self):
raise NotImplementedError()
def setMethodListObj (self, obj):
raise NotImplementedError()
def getMethodListObj (self):
raise NotImplementedError()
def hasMethodList (self):
raise NotImplementedError()
"""
Extracted from the below data:
{
"node": {
"name": "potential",
"namespace": "potential",
"className": "PotentialMaapi",
"importStatement": "from a.api.yang.modules.tech.content.qwilt_tech_content_line.tech.content.line.analyzer.potential.potential_maapi_gen import PotentialMaapi",
"baseClassName": "PotentialMaapiBase",
"baseModule": "potential_maapi_base_gen"
},
"ancestors": [
{
"moduleYangNamespacePrefix": "qt",
"yangName": "tech",
"namespace": "tech",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech",
"name": "tech"
},
{
"moduleYangNamespacePrefix": "qtc",
"yangName": "content",
"namespace": "content",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-content",
"name": "content"
},
{
"moduleYangNamespacePrefix": "qtc-line",
"isCurrent": false,
"yangName": "line",
"namespace": "line",
"isList": true,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-content-line",
"keyLeaf": {
"varName": "line",
"defaultVal": null,
"typeHandler": "handler: StringHandler"
},
"name": "line"
},
{
"moduleYangNamespacePrefix": "qtc-line",
"yangName": "analyzer",
"namespace": "analyzer",
"isCurrent": false,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-content-line",
"name": "analyzer"
},
{
"moduleYangNamespacePrefix": "qtc-line",
"yangName": "potential",
"namespace": "potential",
"isCurrent": true,
"isList": false,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-content-line",
"name": "potential"
}
],
"descendants": [
{
"moduleYangNamespacePrefix": "qtc-line",
"memberName": "methodList",
"yangName": "method",
"className": "BlinkyMethodMaapiList",
"importStatement": "from a.api.yang.modules.tech.content.qwilt_tech_content_line.tech.content.line.analyzer.potential.method.method_maapi_list_gen import BlinkyMethodMaapiList",
"isList": true,
"moduleYangNamespace": "http://qwilt.com/ns/yang/device/tech/qwilt-tech-content-line"
}
],
"conditionalDebugName": null,
"operLeaves": [],
"module": {},
"configLeaves": [],
"env": {
"namespaces": [
"a",
"api",
"yang",
"modules",
"tech",
"content",
"qwilt_tech_content_line"
]
},
"leaves": [],
"createTime": "2013"
}
"""
|
[
"afeset@gmail.com"
] |
afeset@gmail.com
|
c860740882ec34e2bcb0dbcf0500ca6422ae34cc
|
33d091a4b5ce3f956c3347d31413588e0e23e461
|
/client/views/views_1.py
|
7f31353bda763514867cc2207e25dfc874a44de9
|
[] |
no_license
|
tejaswinipatne/tech_dev
|
90d700096801aaf1e49f31aa33c61e925c055410
|
bfd7c24e6f53a055af011f39317e84742867dd13
|
refs/heads/master
| 2020-12-02T05:22:34.636220
| 2019-12-30T11:42:08
| 2019-12-30T11:42:08
| 230,903,661
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 74,864
|
py
|
import ast
import json
from user.models import user
from zipfile import *
from django.conf import settings
from django.contrib import messages
from django.core import serializers
from django.core.mail import send_mail
from django.http import HttpResponse, JsonResponse
from django.shortcuts import (get_object_or_404, redirect, render,
render_to_response)
from django.template import loader
from django.urls import resolve, reverse
from django.template.loader import render_to_string
from django.urls import resolve
from django.utils.html import strip_tags
from client.models import ApiLinks
from campaign.forms import Script_Form
from campaign.models import *
from campaign.choices import *
from client.decorators import *
from client.models import *
from client.utils import RegisterNotification, saving_assets, update_assets,get_external_vendors,grand_child_access_call,email_domain_check, percentage, noti_via_mail
from leads.models import *
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from setupdata.models import *
from vendors.views.views import *
from superadmin.utils import collect_campaign_details
from .chat_view import get_vendor_list_of_campaign
from superadmin.models import *
from django.template import RequestContext
from client.views.views import *
@login_required
def vendor_list(request):
''' Return vendor list '''
list = []
vendor_list_details = []
user_id = request.session['userid']
apilinks = ApiLinks.objects.all()
vendor_list = external_vendor.objects.filter()
for row in vendor_list:
list = ast.literal_eval(row.client_id)
if user_id in list:
user_details = user.objects.get(id=row.user_id)
vendor_list_details.append(
{'email': user_details.email, 'user_name': user_details.user_name})
return render(request, 'vendor1/add_vendorlist.html', {'vendor_list': vendor_list_details, 'apilinks': apilinks})
@login_required
def campaingdesc(request, camp_id):
""" campaign desciption includes vendors working on campaign """
camp = Campaign.objects.get(id=camp_id)
vendoralloc = campaign_allocation.objects.filter(campaign_id=camp_id)
return render(request, 'campaign/campdescription.html', {'camp': camp, 'vendoralloc': vendoralloc})
@login_required
def feedback(request):
""" feedback from client for vendor """
# print(request.POST)
vendor_rating = request.POST.get('rating')
# vendor_rating = GenericRelation(Rating, related_query_name='foos')
feedback = request.POST.get('feed')
if feedback:
feed_back = feedback_details.objects.create(feedback=feedback)
feed_back.save()
else:
print("feedback is empty")
return render(request, 'campaign/feedback.html', {})
@login_required
def leadlist(request, camp_id, status):
""" return single vendor leads to the client """
list1 = []
all_header = []
all_lead_header = []
batchlist = []
count = []
camp_alloc = campaign_allocation.objects.filter(id=camp_id)
global_rejected_reason = leads_rejected_reson.objects.filter(
status=0, is_active=1)
client_rejected_reason = leads_rejected_reson.objects.filter(
status=1, is_active=1, user_id=request.session['userid'])
global_rectify_reason = Leads_Rectify_Reason.objects.filter(
status=0, is_active=1)
client_rectify_reason = Leads_Rectify_Reason.objects.filter(
status=1, is_active=1, user_id=request.session['userid'])
if camp_alloc.count() == 1 and status == 1:
camp_alloc = campaign_allocation.objects.get(id=camp_id)
header = create_custom_header(
camp_alloc.campaign_id, request.session['userid']) #fav lead
data = {'campaign_id': camp_alloc.campaign_id, 'camp_status': status, 'camp_id': camp_id, 'status': status, 'camp_name': camp_alloc.campaign.name,
'cpl': camp_alloc.cpl, 'lead': camp_alloc.volume, 'submited_lead': camp_alloc.submited_lead, 'return_lead': camp_alloc.return_lead, 'vendor_id': camp_alloc.client_vendor_id, 'user_name': user.objects.filter(id=camp_alloc.client_vendor_id)[0].user_name}
if camp_alloc.upload_leads != None:
list1 = ast.literal_eval(camp_alloc.upload_leads)
batchlist = batch_list(list1)
if len(header) == 0:
for dict in list1:
count=0
if len(dict.keys()) > count :
count = len(dict.keys())
all_header,all_lead_header=[],[]
for key in dict:
all_header.append(key)
all_lead_header.append(key)
all_header = create_header(all_header)
labels = get_lead_header(camp_alloc.campaign_id)
labels +=join_custom_question_header(camp_alloc.campaign_id)
if batchlist != []:
count = batchlist[1]
batchlist = batchlist[0]
if len(all_header) == 0:
all_header = labels
export_file_name=export_data(camp_alloc.campaign.name,camp_alloc.campaign_id,request.session['userid'],1,camp_alloc.client_vendor.password) # download zipfile and save
return render(request, 'campaign/client_leadlist.html', {'export_file_name':export_file_name,'approve_leads':check_approve_lead(list1),'client_rectify_reason': client_rectify_reason,
'global_rectify_reason': global_rectify_reason, 'client_rejected_reason': client_rejected_reason, 'global_rejected_reason': global_rejected_reason, 'campaigns': data, 'leadlist': list1,
'all_lead_header': all_lead_header, 'all_header': all_header, 'header': header, 'status': status,'batchlist':batchlist,'count':count})
elif camp_alloc.count() == 1 and status == 4:
camp_alloc = campaign_allocation.objects.get(id=camp_id)
header = create_custom_header(
camp_alloc.campaign_id, request.session['userid'])
data = {'campaign_id': camp_alloc.campaign_id, 'camp_status': status, 'camp_id': camp_alloc.campaign_id, 'camp_alloc_id': camp_id, 'camp_name': camp_alloc.campaign.name,
'cpl': camp_alloc.old_cpl, 'lead': camp_alloc.old_volume, 'submited_lead': camp_alloc.submited_lead, 'return_lead': camp_alloc.return_lead}
if camp_alloc.upload_leads != None:
list1 = ast.literal_eval(camp_alloc.upload_leads)
if len(header) == 0:
for dict in list1:
count=0
if len(dict.keys()) > count :
count = len(dict.keys())
all_header,all_lead_header=[],[]
for key in dict:
all_header.append(key)
all_lead_header.append(key)
all_header = create_header(all_header)
labels = get_lead_header(camp_alloc.campaign_id)
labels +=join_custom_question_header(camp_alloc.campaign_id)
if len(all_header) == 0:
all_header = labels
export_file_name=export_data(camp_alloc.campaign.name,camp_alloc.campaign_id,request.session['userid'],4,camp_alloc.client_vendor.password)
return render(request, 'campaign/client_leadlist.html', {'export_file_name':export_file_name,'approve_leads':check_approve_lead(list1),'campaigns': data, 'leadlist': list, 'batchlist':batchlist,'count':count,'all_lead_header': all_lead_header, 'all_header': all_header, 'header': header, 'status': status_code})
return render(request, 'campaign/client_leadlist.html', {'camp_id': camp_id, 'status': status,'approve_leads':0,})
def client_lead_list(request, camp_id, status):
""" leadlist display when client upload leads"""
leadlist = []
all_header = []
all_lead_header = []
list=[]
# import pdb;pdb.set_trace()
if campaign_allocation.objects.filter(campaign_id=camp_id, status=status, client_vendor_id=request.session['userid']).count() == 1:
camp_id = campaign_allocation.objects.get(
campaign_id=camp_id, status=status, client_vendor_id=request.session['userid'])
camp_id = camp_id.id
camp_alloc = campaign_allocation.objects.filter(id=camp_id)
if camp_alloc.count() == 1:
camp_alloc = campaign_allocation.objects.get(id=camp_id)
header = create_custom_header(
camp_alloc.campaign_id, request.session['userid'])
data = {'camp_id': camp_alloc.campaign_id, 'camp_alloc_id': camp_id, 'camp_name': camp_alloc.campaign.name, 'cpl': camp_alloc.campaign.cpl,
'lead': camp_alloc.campaign.target_quantity, 'submited_lead': camp_alloc.submited_lead, 'client_name':camp_alloc.campaign.user.user_name,'return_lead': camp_alloc.return_lead}
if camp_alloc.upload_leads != None:
list = ast.literal_eval(camp_alloc.upload_leads)
if len(header) == 0:
count=0
for dict in list:
if len(dict.keys()) > count :
count = len(dict.keys())
all_header,all_lead_header=[],[]
for key in dict:
all_header.append(key)
all_lead_header.append(key)
all_header = create_header(all_header)
labels=get_lead_header(camp_alloc.campaign_id)
labels +=join_custom_question_header(camp_alloc.campaign_id)
if len(all_header) == 0:
all_header=labels
return render(request, 'campaign/client_lead_upload.html', {'approve_leads': check_approve_lead(list), 'campaigns': data, 'leadlist': list, 'all_lead_header': all_lead_header, 'all_header': all_header, 'header': header, 'status': camp_alloc.status, 'camp_id': camp_id})
else:
if campaign_allocation.objects.create(campaign_id=camp_id,volume=0,cpl=0,status=status, client_vendor_id=request.session['userid']):
camp_id = campaign_allocation.objects.get(
campaign_id=camp_id, status=status, client_vendor_id=request.session['userid'])
camp_id = camp_id.id
camp_alloc = campaign_allocation.objects.filter(id=camp_id)
if camp_alloc.count() == 1:
camp_alloc = campaign_allocation.objects.get(id=camp_id)
header = create_custom_header(
camp_alloc.campaign_id, request.session['userid'])
data = {'camp_id': camp_alloc.campaign_id, 'camp_alloc_id': camp_id, 'camp_name': camp_alloc.campaign.name, 'cpl': camp_alloc.campaign.cpl,
'lead': camp_alloc.campaign.target_quantity, 'submited_lead': camp_alloc.submited_lead,'client_name':camp_alloc.campaign.user.user_name, 'return_lead': camp_alloc.return_lead}
if camp_alloc.upload_leads != None:
list = ast.literal_eval(camp_alloc.upload_leads)
if len(header) == 0:
count=0
for dict in list:
if len(dict.keys()) > count :
count = len(dict.keys())
all_header,all_lead_header=[],[]
for key in dict:
all_header.append(key)
all_lead_header.append(key)
all_header = create_header(all_header)
labels=get_lead_header(camp_alloc.campaign_id)
labels +=join_custom_question_header(camp_alloc.campaign_id)
if len(all_header) == 0:
all_header=labels
# return render(request, 'campaign/client_lead_upload.html', {'camp_id': camp_id, 'status': status})
return render(request, 'campaign/client_lead_upload.html', {'approve_leads': check_approve_lead(list), 'campaigns': data, 'leadlist': list, 'all_lead_header': all_lead_header, 'all_header': all_header, 'header': header, 'status': camp_alloc.status, 'camp_id': camp_id})
@login_required
@is_client
def lead_error_list(request,camp_id,camp_alloc_id):
import datetime
userid=request.session['userid']
date=datetime.datetime.today().strftime('%Y-%m-%d')
lead_data={'success':1}
if Lead_Uploaded_Error.objects.filter(campaign_id=camp_id,user_id=userid,created_date=date,lead_upload_status=1).exists():
lead_data=Lead_Uploaded_Error.objects.filter(campaign_id=camp_id,user_id=userid,created_date=date,lead_upload_status=1).latest('exact_time')
"""
store all leads in leads
"""
leads=[]
if lead_data.all_lead_count == 0:
if lead_data.uploaded_lead_count > 0:
leads=ast.literal_eval(lead_data.uploaded_lead)
if lead_data.remove_duplicate_lead_csv_cnt > 0:
leads=leads+ast.literal_eval(lead_data.remove_duplicate_lead_csv)
if lead_data.remove_lead_header_cnt > 0:
leads=leads+ast.literal_eval(lead_data.remove_lead_header)
if lead_data.duplicate_with_vendor_cnt > 0:
leads=leads+ast.literal_eval(lead_data.duplicate_with_vendor)
if lead_data.duplicate_with_our_cnt > 0:
leads=leads+ast.literal_eval(lead_data.duplicate_with_our)
lead_data.all_lead=leads
lead_data.all_lead_count=len(leads)
lead_data.save()
else:
leads=ast.literal_eval(lead_data.all_lead)
list = []
all_header = []
all_lead_header = []
labels=get_lead_header(camp_id)
labels +=join_custom_question_header(camp_id)
if len(leads) > 0:
for dict in leads[0]:
all_header.append(dict)
all_lead_header.append(dict)
return render(request, 'campaign/lead_error_list.html', {'edit_lead_header':labels,'leadlist': leads, 'all_lead_header': all_lead_header, 'all_header': all_header,'camp_id': camp_id,'camp_alloc_id':camp_alloc_id})
def upload_with_rejected_lead_web(request,camp_id,camp_alloc_id):
from datetime import datetime
userid=request.session['userid']
date=datetime.today().strftime('%Y-%m-%d')
if Lead_Uploaded_Error.objects.filter(campaign_id=camp_id,user_id=userid,created_date=date,lead_upload_status=1).exists():
lead_data=Lead_Uploaded_Error.objects.filter(campaign_id=camp_id,user_id=userid,created_date=date,lead_upload_status=1).latest('exact_time')
leads=[]
"""
store all leads in leads
"""
leads=ast.literal_eval(lead_data.all_lead)
lead_data.lead_upload_status=0
lead_data.status=0
lead_data.save()
last_id_lead=get_last_id_of_lead(camp_alloc_id)
last_batch_id = get_last_batch_id(camp_alloc_id)
if len(leads)>0:
for lead in leads:
last_id_lead +=1
lead['id']=last_id_lead
lead['batch']=last_batch_id
lead_data=upload_lead_database(leads,camp_alloc_id,userid,1,last_batch_id)
camp_detail=campaign_allocation.objects.get(id=camp_alloc_id)
return redirect('client_lead_list', camp_id=camp_id,status=camp_detail.status)
@login_required
@is_client
def upload_without_rejected_lead_web(request,camp_id,camp_alloc_id):
from datetime import datetime
userid=request.session['userid']
date=datetime.today().strftime('%Y-%m-%d')
lead_data={'success':1}
if Lead_Uploaded_Error.objects.filter(campaign_id=camp_id,user_id=userid,created_date=date,lead_upload_status=1).exists():
lead_data=Lead_Uploaded_Error.objects.filter(campaign_id=camp_id,user_id=userid,created_date=date,lead_upload_status=1).latest('exact_time')
real_data,leads=[],[]
"""
store all leads in leads
"""
leads=ast.literal_eval(lead_data.all_lead)
lead_data.lead_upload_status=0
lead_data.status=0
lead_data.save()
for lead in leads:
print(leads)
if lead['TC_lead_status'] == 'valid lead':
real_data.append(lead)
last_id_lead=get_last_id_of_lead(camp_alloc_id)
last_batch_id = get_last_batch_id(camp_alloc_id)
if len(real_data)>0:
for lead in real_data:
last_id_lead +=1
lead['id']=last_id_lead
lead['batch']=last_batch_id
lead_data=upload_lead_database(real_data,camp_alloc_id,userid,1,last_batch_id)
camp_detail=campaign_allocation.objects.get(id=camp_alloc_id)
return redirect('client_lead_list', camp_id=camp_id,status=camp_detail.status)
#upload excel lead data into database
def upload_lead_database(dict, camp_alloc_id, userid,is_upload,last_batch_id):
""" Upload excel lead data into database """
upload_lead_cnt=len(dict)
data = campaign_allocation.objects.get(id=camp_alloc_id, client_vendor_id=userid)
"""
IF user select upload with reject lead or with rejected lead
Only that time upload data
"""
if is_upload == 1:
if data.submited_lead == 0:
data.upload_leads = dict
data.submited_lead = len(dict)
data.batch_count = last_batch_id
data.save()
else:
old_data = ast.literal_eval(data.upload_leads)
data.upload_leads = old_data + dict
data.submited_lead = len(old_data + dict)
data.batch_count = last_batch_id
data.save()
if upload_lead_cnt > 0:
return {'success':1,'upload_lead_cnt':upload_lead_cnt}
return {'success':1,'upload_lead_cnt':0}
#get last id of lead data campaign
def get_last_id_of_lead(camp_alloc_id):
""" Get last id of lead """
camp_desc=campaign_allocation.objects.get(id=camp_alloc_id)
if camp_desc.upload_leads:
lead_desc=ast.literal_eval(camp_desc.upload_leads)
if len(lead_desc) > 0:
return lead_desc[-1]['id']
return 0
else:
return 1
def get_last_batch_id(camp_alloc_id):
'''get batch id for upload'''
camp_desc=campaign_allocation.objects.get(id=camp_alloc_id)
if camp_desc.batch_count > 0:
return camp_desc.batch_count + 1
return 1
# check approvr leads
def check_approve_lead(list):
""" approve leads """
approve = 0
for dict in list:
if dict['status'] == 1:
approve += 1
return approve
def get_lead_header(camp_id):
""" returs leads orginal headers """
camp_lead=Delivery.objects.get(campaign_id=camp_id)
if camp_lead.custom_header_status == 0 :
labels = camp_lead.data_header
labels = list(labels.split(','))
else:
labels = camp_lead.custom_header
labels = labels.split(',')
return labels
#lead data export into excel
def export_data(camp_name,camp_id,vendor_id,status,password):
import unicodecsv as csv
import os
import shutil
from os import path
from shutil import make_archive
from openpyxl import Workbook
toCSV = []
toCSV = convert_data_list(camp_id, vendor_id, status)
if len(toCSV) > 0:
if type(toCSV[0]) is dict:
keys = toCSV[0].keys()
with open('export_approve_leads.csv', 'wb') as output_file:
dict_writer = csv.DictWriter(output_file, keys)
dict_writer.writeheader()
dict_writer.writerows(toCSV)
with open('export_approve_leads.csv', 'rb') as myfile:
response = HttpResponse(myfile, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=export_approve_leads.csv'
src = path.realpath("export_approve_leads.csv")
root_dir, tail = path.split(src)
file_name='/static/'+str(camp_name)+'_'+str(camp_id)+'_'+str(vendor_id)+'.zip'
import pyminizip
compression_level = 5 # 1-9
pyminizip.compress(root_dir+'/export_approve_leads.csv',None,root_dir+file_name, password, compression_level)
return file_name
# convert lead data as per header lead
def convert_data_list(camp_id, vendor_id, status):
""" convert lead data as per header lead """
vendor_lead_list = []
data = []
lead_dict = {}
labels = get_lead_header(camp_id)
labels +=join_custom_question_header(camp_id)
# status=1 means all vendors leads and 2 means particoluar vendor leads
if status == 1:
camp_lead_data = campaign_allocation.objects.filter(
campaign_id=camp_id, status=1)
for row in camp_lead_data:
if row.submited_lead > 0:
vendor_lead_list += ast.literal_eval(row.upload_leads)
if len(vendor_lead_list) > 0:
for dict in vendor_lead_list:
if dict['status'] >=0 :
for key in dict:
if key in labels:
lead_dict.update({key: dict[key]})
data.append(lead_dict)
lead_dict = {}
return data
else:
print("hiielse")
return labels
else:
camp_lead_data = campaign_allocation.objects.filter(
campaign_id=camp_id, status=1, client_vendor_id=vendor_id)
for row in camp_lead_data:
if row.submited_lead > 0:
vendor_lead_list += ast.literal_eval(row.upload_leads)
if len(vendor_lead_list) > 0:
for dict in vendor_lead_list:
if dict['status'] == 1:
for key in dict:
if key in labels:
lead_dict.update({key: dict[key]})
data.append(lead_dict)
lead_dict = {}
return data
else:
return labels
def create_header(dict):
""" creates header list """
list = []
for row in dict:
if row == 'reason':
list.append('Status Code')
else:
list.append(row)
return list
def create_custom_header(camp_id, userid):
""" create custome header list """
fav_lead = []
fav_lead_details = favorite_leads_header.objects.filter(
campaign_id=camp_id, user_id=userid)
if fav_lead_details.count() == 1:
fav_lead_details = favorite_leads_header.objects.get(
campaign_id=camp_id, user_id=userid)
return ast.literal_eval(fav_lead_details.header)
else:
return fav_lead
def lead_approve(request):
""" approve selected lead """
return_lead = 0
results = [int(i) for i in request.POST.getlist('id[]')]
status = request.POST.get('status')
camp_id = request.POST.get('camp_id')
camp_alloc = campaign_allocation.objects.get(
id=camp_id, status=status)
list = ast.literal_eval(camp_alloc.upload_leads)
for dict in list:
if dict['id'] in results:
dict['status'] = 1
elif dict['status'] == 2:
return_lead += 1
camp_alloc.upload_leads = list
camp_alloc.return_lead = return_lead
camp_alloc.approve_leads = check_approve_lead(list)
camp_alloc.save()
action_on_camp_by_client(1, results, camp_alloc.campaign.id, camp_alloc.client_vendor.id, camp_alloc.client_vendor.user_name)
data = {'success': 1, 'msg': 'Leads Approved Successfully!..'}
return JsonResponse(data)
def lead_rejected(request):
""" reject selected lead """
return_lead = 0
results = [int(i) for i in request.POST.getlist('id[]')]
status = request.POST.get('status')
camp_id = request.POST.get('camp_id')
Reason = request.POST.get('Reason')
lead_desc = request.POST.get('lead_desc')
camp_alloc = campaign_allocation.objects.get(
id=camp_id)
list = ast.literal_eval(camp_alloc.upload_leads)
for dict in list:
if dict['id'] in results:
dict['status'] = int(request.POST.get('lead_status'))
dict['reason'] = Reason
dict['lead_desc'] = lead_desc
if dict['status'] == 2:
return_lead += 1
camp_alloc.upload_leads = list
camp_alloc.return_lead = return_lead
camp_alloc.save()
action_on_camp_by_client(2, results, camp_alloc.campaign.id, camp_alloc.client_vendor.id, camp_alloc.client_vendor.user_name, Reason, lead_desc)
data = {'success': 1, 'msg': 'Leads Rejected Successfully!..'}
# send notification when rectify lead
title = 'Lead Reject'
desc = str(len(results))+'More leads Rejected on ' + \
str(camp_alloc.campaign.name)
from client.utils import noti_via_mail
noti_via_mail(camp_alloc.client_vendor_id, title, desc, mail_noti_client_action_on_leads)
RegisterNotification(
request.session['userid'], camp_alloc.client_vendor_id, desc, title, 1, None, camp_alloc)
return JsonResponse(data)
def lead_rectify(request):
""" rectify selected leads """
results = [int(i) for i in request.POST.getlist('id[]')]
status = request.POST.get('status')
camp_id = request.POST.get('camp_id')
Reason = request.POST.get('Reason')
lead_desc = request.POST.get('lead_desc')
camp_alloc = campaign_allocation.objects.get(
id=camp_id)
list = ast.literal_eval(camp_alloc.upload_leads)
for dict in list:
if dict['id'] in results:
dict['status'] = 3
dict['reason'] = Reason
dict['lead_desc'] = lead_desc
batch = dict['batch']
camp_alloc.upload_leads = list
camp_alloc.save()
action_on_camp_by_client(3, results, camp_alloc.campaign.id, camp_alloc.client_vendor.id, camp_alloc.client_vendor.user_name, Reason, lead_desc)
data = {'success': 1, 'msg': 'Leads Rectify Successfully!..'}
# send notification when rectify lead
title = 'Lead Rectify'
desc = f'#{str(len(results))} More leads Rectify in batch-{batch} on {str(camp_alloc.campaign.name)}.'
desc = str(len(results))+' More leads Rectify on ' + \
str(camp_alloc.campaign.name)
from client.utils import noti_via_mail
noti_via_mail(camp_alloc.client_vendor_id, title, desc, mail_noti_client_action_on_leads)
RegisterNotification(
request.session['userid'], camp_alloc.client_vendor_id, desc, title, 1, None, camp_alloc)
return JsonResponse(data)
@login_required
def campaign_vendor_list(request, camp_id):
""" campaign vendor list """
users = user.objects.filter(usertype_id=2)
clients = match_campaign_vendor.objects.filter(client_vendor_id__in=users, campaign_id=camp_id)
return render(request, 'vendor1/vendorlist.html', {'clients': clients, 'camp': camp_id})
def Suggest(request):
""" suggest vendor list """
results = [int(i) for i in request.POST.getlist('id[]')]
data = {'success': 0}
for id in results:
campaign_id = request.POST.get('camp_id', None)
vendor_id = id
counter = campaign_allocation.objects.filter(
campaign_id=campaign_id, client_vendor_id=vendor_id, status=0, suggest_status=1).count()
if counter == 1:
t = campaign_allocation.objects.get(
campaign_id=campaign_id, client_vendor_id=vendor_id)
t.status = 0
t.suggest_status = 1
t.save()
data = {'success': 1, 'msg': "Vendor Suggest Successfully!.."}
else:
campaign_allocation.objects.create(
status=0, client_vendor_id=vendor_id, campaign_id=campaign_id, suggest_status=1)
data = {'success': 1, 'msg': "Vendor Suggest Successfully!.."}
return JsonResponse(data)
def create_demo_campaign(request):
return render(request, 'campaign/create_demo_campaign.html', {})
def password_check(passwd):
SpecialSym =['$', '@', '#', '%', '^', '&', '*', '+', '-', '!']
if len(passwd) < 8:
return False
if not any(char.isdigit() for char in passwd):
return False
if not any(char.isupper() for char in passwd):
return False
if not any(char.islower() for char in passwd):
return False
if not any(char in SpecialSym for char in passwd):
return False
else:
return True
def add_venodr(request):
""" add vendors """
status = 0
email = str(request.POST.get('email')).lower()
username = str(request.POST.get('username'))
pwd = request.POST.get('pwd')
user_id = request.session['userid']
site_url = settings.BASE_URL
email_check = email_domain_check(email)
client = user.objects.get(id=user_id).user_name
t = password_check(pwd)
if t is True:
if email_check['status'] == 1:
if user.objects.filter(email=email).count() == 1:
user_details = user.objects.get(email=email)
data = add_external_vendor(user_details.id, user_id)
else:
data = add_external_user_vendor(user_id, email, pwd, username)
subject = "Invitation From Techconnetr"
html_message = render_to_string('email_templates/external_user_register_template.html', {
'username': email,'client': client,'site_url': site_url, 'password': pwd})
plain_message = strip_tags(html_message)
from_email = settings.EMAIL_HOST_USER
to_list = [email]
if send_mail(subject, plain_message, from_email, to_list, fail_silently=True,html_message=html_message):
print('sent mail')
else:
data = email_check
print(data)
return JsonResponse(data)
else:
data = {'status':0,'msg':"Password should contain characters, numbers and special symbols!"}
return JsonResponse(data)
def add_external_vendor(user_id, client_id):
""" add TC vendor by superadmin """
list = []
if external_vendor.objects.count() > 0:
if external_vendor.objects.filter(user_id=user_id).count() == 1:
client_vendor_data = external_vendor.objects.get(user_id=user_id)
list = ast.literal_eval(client_vendor_data.client_id)
if client_id in list:
return {'status': 0, 'msg': 'you already added this vendor', 'user_id': user_id}
else:
list.append(client_id)
client_vendor_data.client_id = list
client_vendor_data.save()
return {'status': 2, 'msg': 'This vendor already added by another vendor', 'user_id': user_id}
else:
list.append(client_id)
external_vendor.objects.create(client_id=list, user_id=user_id)
return {'status': 1, 'msg': ' vendor added Successfully!...', 'user_id': user_id}
else:
list.append(client_id)
external_vendor.objects.create(client_id=list, user_id=user_id)
return {'status': 1, 'msg': ' vendor added Successfully!...', 'user_id': user_id}
def add_external_user_vendor(client_id, email, pwd, username):
""" add vendor by client """
user.objects.create(email=email, user_name=username,password=pwd, is_active=1, usertype_id=5)
user_id = user.objects.latest('id')
return add_external_vendor(user_id.id, client_id)
@login_required
def vendor_allocation(request, camp_id):
""" vendor allocation by to client to his external vendor """
campaign_details = Campaign.objects.get(id=camp_id)
list = []
vendor_list_details = []
username = campaign_details.user.user_name
data = {
'username': username,
'userid': campaign_details.user_id,
'cpl': campaign_details.cpl,
'targat_quantity': campaign_details.target_quantity,
'ramaining_quantity': campaign_details.raimainingleads,
'approveleads': campaign_details.approveleads,
}
user_id = request.session['userid']
vendor_list = external_vendor.objects.filter()
for row in vendor_list:
list = ast.literal_eval(row.client_id)
if user_id in list:
print(row.user_id)
if campaign_allocation.objects.filter(client_vendor_id=row.user_id, cpl__gte=0, volume__gte=0, campaign_id=camp_id).count() != 1:
user_details = user.objects.get(id=row.user_id)
vendor_list_details.append({
'email': user_details.email,
'checked': '0',
'display': 'none',
'vendor_name': user_details.email,
'vendor_id': row.user_id,
'camp_id': camp_id,
})
return render(request, 'campaign/client_vendor_allocation.html', {'campdata': data, 'vendor_data': vendor_list_details})
@login_required
def insert_lead(request, camp_id, camp_alloc_id):
""" display lead header to upload indivdual lead """
labels = get_lead_header(camp_id)
labels +=join_custom_question_header(camp_id)
camp_alloc = campaign_allocation.objects.get(id=camp_alloc_id)
return render(request, 'campaign/addlead.html', {'lead_header': labels, 'camp_id': camp_id, 'camp_alloc_id': camp_alloc_id,'camp_alloc':camp_alloc})
# following code is submit asset data in to database kishor
def asset_submit(request):
""" to submit assets for campaign """
if request.POST:
t = Terms.objects.filter(campaign=request.POST.get('campaign'))
if t:
t[0].assets_name = request.POST.get('assets_name')
t[0].sponsers = request.POST.get('sponsers')
t[0].asset_distributor = request.POST.get('asset_distributor')
t[0].add_assetslink = request.POST.get('add_assetslink')
if t[0].assets_type:
t[0].assets_type = update_assets(
request, t[0].assets, ast.literal_eval(t[0].assets_type))
else:
file_dict = saving_assets(request, t[0].assets)
t[0].assets_type = file_dict
t[0].save()
data = {'status_code': 1}
return JsonResponse(data)
else:
data = {'status_code': 2, 'message': 'Camapign Not Found'}
return JsonResponse(data)
data = {'status_code': 2, 'message': 'Please Fill the detail'}
return JsonResponse(data)
def remove_asset(request):
""" remove links from campaign assets"""
term_data = Terms.objects.get(campaign_id=request.POST.get('camp_id'))
asset_type = request.POST.get('asset_type')
data = ast.literal_eval(term_data.assets_type)
if data:
if data[asset_type]['link']:
if request.POST.get('asset') in data[asset_type]['link']:
data[asset_type]['link'].remove(request.POST.get('asset'))
new_list = data[asset_type]['link']
if len(new_list) > 0:
data[asset_type].update({'link': new_list})
term_data.assets_type = data
term_data.save()
else:
if bool(data[asset_type]):
data[asset_type].pop('link',None)
if not bool(data[asset_type]):
data.pop(asset_type,None)
else:
data.pop(asset_type,None)
term_data.assets_type = data
term_data.save()
data = {'status_code': 1, 'message': 'asset removed'}
else:
data = {'status_code': 1, 'message': 'asset remove error'}
return JsonResponse(data)
def remove_file_asset(request):
""" remove files from campaign assets"""
term_data = Terms.objects.get(campaign_id=request.POST.get('camp_id'))
asset_type = request.POST.get('asset_type')
data = ast.literal_eval(term_data.assets_type)
if data:
if len(data[asset_type]['files']) > 0:
for file_dict in data[asset_type]['files']:
if request.POST.get('asset') == file_dict['url']:
data[asset_type]['files'].remove(file_dict)
new_list = data[asset_type]['files']
if len(new_list) > 0:
data[asset_type].update({'files': new_list})
term_data.assets_type = data
term_data.save()
else:
if bool(data[asset_type]):
data[asset_type].pop('files',None)
if not bool(data[asset_type]):
data.pop(asset_type,None)
else:
data.pop(asset_type,None)
term_data.assets_type = data
term_data.save()
data = {'status_code': 1, 'message': 'asset removed'}
else:
data = {'status_code': 1, 'message': 'asset remove error'}
return JsonResponse(data)
def script_submit(request):
""" to submit scripts for campaign """
camp_id = Scripts.objects.get_or_create(
campaign_id=request.POST.get('campaign'))
if request.FILES:
myfile = request.FILES['client_script']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
camp_id[0].client_script = filename
camp_id[0].save()
data = {'status': 1}
return JsonResponse(data)
data = {'status': 2}
return JsonResponse(data)
def vendor_script_submit(request):
""" upload vendor scripts """
camp_id = Scripts.objects.get_or_create(
campaign_id=request.POST.get('campaign'))
if request.FILES:
myfile = request.FILES['client_script']
fs = FileSystemStorage()
filename = fs.save(myfile.name, myfile)
uploaded_file_url = fs.url(filename)
camp_id[0].client_script = filename
camp_id[0].save()
data = {'status': 1}
return JsonResponse(data)
data = {'status': 2}
return JsonResponse(data)
def get_asset_specs(request):
""" to fetch uploaded assets """
asset = Terms.objects.get(campaign_id=request.POST.get('id'))
data = {
'name':asset.campaign.name,
'assets_name': asset.assets_name if asset.assets_name else 'None',
'assets': asset.assets if asset.assets else 'None',
'sponsers': asset.sponsers if asset.sponsers else 'None',
'assets_type': ast.literal_eval(asset.assets_type) if asset.assets_type else 'None',
'asset_distributor': asset.asset_distributor if asset.asset_distributor else 'None',
'add_assetslink': asset.add_assetslink if asset.add_assetslink else 'None',
}
return JsonResponse(data)
def get_scripts(request):
""" return uploaded scripts """
term_data = Scripts.objects.filter(campaign_id=request.POST.get('id'))
data = {}
if term_data:
data = {
'url': term_data[0].client_script.url if term_data[0].client_script.url else 'None', }
print(data)
return JsonResponse(data)
def get_agreements(request):
pass
@login_required
def campaign_notebook(request):
""" return all campaigns to the campaign notebook """
counter = Campaign.objects.filter(
user_id=request.session['userid']).count()
data = []
if counter > 0:
campaign_details = Campaign.objects.filter(
user_id=request.session['userid'])
pending_camp_count = Campaign.objects.filter(
user_id=request.session['userid'], status=3).count()
live_camp_count = Campaign.objects.filter(
user_id=request.session['userid'], status=1).count()
assigned_camp_count = Campaign.objects.filter(
user_id=request.session['userid'], status=5).count()
complete_camp_count = Campaign.objects.filter(
user_id=request.session['userid'], status=4).count()
return render(request, 'campaign/campaign_notebook.html', {'camps': campaign_details, 'pending_count': pending_camp_count,
'live_count': live_camp_count, 'assigned_count': assigned_camp_count, 'complete_count': complete_camp_count})
else:
return render(request, 'campaign/campaign_notebook.html', {})
def get_camp_data(request):
""" return campaign data to display on campaign notebook """
camp_details = Campaign.objects.get(id=request.POST.get('id'))
mapping = Mapping.objects.get(campaign_id=request.POST.get('id'))
aggrement = data_assesment.objects.get(user_id=request.session['userid'])
data = {
'name': camp_details.name,
'cpl': camp_details.cpl,
'volume': camp_details.target_quantity,
'camp_io':camp_details.io_number,
'rfq': camp_details.rfq,
'status': camp_details.status,
'rfq_timer': camp_details.rfq_timer,
'start_date': camp_details.start_date,
'end_date': camp_details.end_date,
'desc': camp_details.description,
'camp_id': request.POST.get('id'),
'sender_id': request.session['userid'],
'special_instr': mapping.special_instructions,
'nda': aggrement.nda_aggrement,
'msa': aggrement.msa_aggrement,
'gdpr': aggrement.gdpr_aggrement,
'dpa': aggrement.dpa_aggrement,
'io': aggrement.io_aggrement,
}
return JsonResponse(data)
def get_vendor_list(request):
""" return assigned vendor list """
camp_details = Campaign.objects.get(id=request.POST.get('camp_id'))
vendor_list = campaign_allocation.objects.filter(
campaign_id=camp_details.id, status__in=[1,4,5]).distinct()
vendor_id = []
vendor_data = []
for vendor in vendor_list:
if vendor.client_vendor_id not in vendor_id:
vendor_id.append(vendor.client_vendor_id)
vendor_data.append({
'id': vendor.client_vendor_id,
'vendor_name': vendor.client_vendor.user_name
})
return JsonResponse(vendor_data, safe=False)
@login_required
def all_lead_display(request, camp_id):
""" return all uploaded leads for a campaign """
all_list = []
all_header = []
all_lead_header = []
submitLead = 0
rejectedLead = 0
camp_alloc = campaign_allocation.objects.filter(campaign_id=camp_id)
if camp_alloc.count() > 0:
header = create_custom_header(camp_id, request.session['userid'])
campaign_details = Campaign.objects.get(id=camp_id)
for row in camp_alloc:
submitLead = submitLead + int(row.submited_lead)
rejectedLead = rejectedLead+int(row.return_lead)
data = {'camp_id': camp_id, 'camp_alloc_id': camp_id, 'camp_name': campaign_details.name, 'cpl': campaign_details.cpl,
'lead': campaign_details.target_quantity, 'submited_lead': submitLead, 'return_lead': rejectedLead}
if row.upload_leads != None:
list = ast.literal_eval(row.upload_leads)
if len(header) == 0:
for dict in list[0]:
all_header.append(dict)
all_lead_header.append(dict)
all_header = create_header(all_header)
else:
all_header = header
for dict in list[0]:
all_lead_header.append(dict)
all_header = create_header(all_header)
all_list.extend(list)
return render(request, 'campaign/client_lead_upload.html', {'campaigns': data, 'leadlist': all_list, 'all_lead_header': all_lead_header, 'all_header': all_header, 'header': header, 'camp_id': camp_id})
def get_rfq_cpl(request):
""" return rfq and cpl sent by superadmin """
campaign = Campaign_rfq.objects.get(campaign_id=request.POST.get('camp_id'))
data = {'cpl': campaign.cpl, 'volume': campaign.volume}
return JsonResponse(data)
def update_status_rfq(request):
""" process actions on rfq resopnce """
camp_id = request.POST.get('camp_id')
cpl = request.POST.get('cpl')
volume = request.POST.get('volume')
choose = request.POST.get('choose')
user_id = request.session['userid']
superadmin = user.objects.get(usertype_id=4)
if choose == '1':
accept_rfq_cpl_by_client(camp_id, cpl, volume, user_id, superadmin.id)
elif choose == '2':
remark = request.POST.get('remark')
counter_rfq_cpl_by_client(
camp_id, cpl, volume, user_id, remark, superadmin.id)
elif choose == '3':
rejected_rfq_cpl_by_client(
camp_id, cpl, volume, user_id, superadmin.id)
data = {'success': 1, 'msg': 'Action Submitted Successfully!...'}
return JsonResponse(data)
def accept_rfq_cpl_by_client(camp_id, cpl, volume, user_id, superadmin_id):
""" accept cpl and volume sent form superadmin """
# changes for rfq datatable
campaign = Campaign_rfq.objects.get(campaign_id=camp_id)
campaign.status = 3
campaign.save()
campaign = Campaign.objects.get(id=camp_id)
name = campaign.name
campaign.cpl = cpl
campaign.target_quantity = volume
campaign.raimainingleads = volume
campaign.rfq_status = 1
campaign.save()
title = 'Accept RFQ CPL'
desc = 'Accept RFQ CPL Request by client on '+str(name)
# noti_via_mail(superadmin_id, title, desc, 1)
RegisterNotification(user_id, superadmin_id, desc,
title, 1, campaign, None)
return True
def counter_rfq_cpl_by_client(camp_id, cpl, volume, user_id, remark, superadmin_id):
""" counter cpl or volume to superadmin """
campaign = Campaign_rfq.objects.get(campaign_id=camp_id)
name = campaign.campaign.name
campaign.old_cpl = campaign.cpl
campaign.old_volume = campaign.volume
campaign.cpl = cpl
campaign.volume = volume
campaign.remark = remark
campaign.status = 2
campaign.save()
campaign = Campaign.objects.get(id=camp_id)
campaign.rfq_status = 2
campaign.save()
title = 'Counter RFQ CPL'
desc = 'Counter RFQ CPL by client on '+str(name)
# noti_via_mail(sender_id,title, desc, 1)
RegisterNotification(user_id, superadmin_id, desc,
title, 1, campaign, None)
return True
def rejected_rfq_cpl_by_client(camp_id, cpl, volume, user_id, superadmin_id):
""" reject cpl and volume sent from superadmin """
campaign = Campaign_rfq.objects.get(campaign_id=camp_id)
campaign.status = 4
campaign.save()
campaign = Campaign.objects.get(id=camp_id)
name = campaign.name
title = 'Reject RFQ CPL'
desc = 'Reject RFQ CPL by client on '+str(name)
# noti_via_mail(sender_id, title, desc, 1)
RegisterNotification(user_id, superadmin_id, desc,
title, 1, campaign, None)
return True
@login_required
def RFQ_Campaign(request, camp_id):
""" to request rfq suggestion from external vendors """
campaign_details = Campaign.objects.get(id=camp_id)
data = {
'cpl': campaign_details.cpl,
'targat_quantity': campaign_details.target_quantity,
'ramaining_quantity': campaign_details.raimainingleads,
'camp_id': camp_id,
'camp_name': campaign_details.name,
'client_name': campaign_details.user.user_name,
}
list = []
vendor_list_details = []
user_id = request.session['userid']
marketing_method = source_touches.objects.filter(is_active=1)
cpl_counter = campaign_allocation.objects.filter(
campaign_id=camp_id, status=3, cpl=0, volume=-1).count()
cpl_list = campaign_allocation.objects.filter(
campaign_id=camp_id, status=3, cpl=0, volume=-1)
vendor_list = external_vendor.objects.filter()
for row in vendor_list:
print(row.user_id)
list = ast.literal_eval(row.client_id)
if user_id in list:
user_details = user.objects.get(id=row.user_id)
vendor_list_details.append(
{'userid': row.user_id, 'email': user_details.email, 'user_name': user_details.user_name})
return render(request, 'campaign/client_rfq_campaign.html', {'cpl_counter': cpl_counter, 'cpl_list': cpl_list, 'data': data, 'vendor_list': vendor_list_details})
def rfq_vendor_allocation(request):
""" to send request to external vendors """
ids = request.POST.getlist('ids[]')
camp_id = request.POST.get('camp_id')
userid = request.session['userid']
title = "New RFQ Campaign Allocated By"
desc = "Client"
for id in ids:
counter = campaign_allocation.objects.filter(
campaign_id=camp_id, client_vendor_id=id, cpl=-1, volume=-1, status=3).count()
if counter != 1:
campaign_allocation.objects.create(
campaign_id=camp_id, client_vendor_id=id, cpl=-1, volume=-1, status=3)
from client.utils import noti_via_mail
noti_via_mail(id, title, desc, mail_noti_new_campaign)
RegisterNotification(userid, id, desc, title, 2,None, campaign_allocation.objects.latest('id'))
data = {'success': 1}
return JsonResponse(data)
def update_rfq_cpl(request):
""" update rfq and cpl """
t = Campaign.objects.get(id=request.POST.get('camp_id'))
t.cpl = request.POST.get('cpl')
t.save()
data = {'success': 1, 'msg': 'CPL updated Successfully!...'}
return JsonResponse(data)
def counter_action_on_cpl(request):
""" counter cpl """
vendor_id = request.POST.get('vendor_id')
camp_alloc_id = request.POST.get('camp_alloc_id')
id = request.POST.get('id')
userid = request.session['userid']
cpl = request.POST.get('cpl')
if id == '1':
counter_action_accept_vendor(camp_alloc_id, vendor_id, userid, cpl)
data = {'success': 1, 'msg': 'Counter Request accepted...'}
elif id == '2':
counter_action_reject_vendor(camp_alloc_id, vendor_id, userid)
data = {'success': 1, 'msg': 'Counter Request Rejected...'}
return JsonResponse(data)
def counter_action_accept_vendor(camp_alloc_id, vendor_id, userid, cpl):
""" accept counter by external vendor """
camp = campaign_allocation.objects.get(id=camp_alloc_id)
name = camp.campaign.name
camp.cpl = cpl
camp.counter_status = 2
camp.save()
title = "Accept Counter by superadmin"
desc = "Accept CPL Counter on " + str(name)
from client.utils import noti_via_mail
noti_via_mail(vendor_id, title, desc, mail_noti_vendor_action_on_camp_request)
RegisterNotification(userid, vendor_id, desc, title, 1, None, camp)
return True
def counter_action_reject_vendor(camp_alloc_id, vendor_id, userid):
""" reject counter request """
camp = campaign_allocation.objects.get(id=camp_alloc_id)
name = camp.campaign.name
camp.counter_status = 2
camp.save()
title = "Reject Counter by superadmin"
desc = "Reject CPL Counter on " + str(name)
from client.utils import noti_via_mail
noti_via_mail(vendor_id, title, desc, mail_noti_vendor_action_on_camp_request)
RegisterNotification(userid, vendor_id, desc, title, 1, None, camp)
return True
def get_camp_specs(request):
""" get all campaign specifications """
camp_id = request.POST.get('id')
camp_data = Campaign.objects.get(id=camp_id)
# print(camp_data.__dict__)
term_data = Terms.objects.get(campaign_id=camp_id)
delivary_data = Delivery.objects.get(campaign_id=camp_id)
mapping_data = Mapping.objects.get(campaign_id=camp_id)
spec_data = Specification.objects.get(campaign_id=camp_id)
# use as text mapping
useAsTxtMapping = []
txt_mapping_exist = UseAsTxtMapping.objects.count()
if(txt_mapping_exist): # if records exist
useAsTxtMapping = UseAsTxtMapping.objects.filter(
campaign=camp_data).values() # list of dicts
print("useAsTxtMapping : ", useAsTxtMapping.values())
context = {
'campaign_id': camp_data.id,
'stat': camp_data.status,
'target_quantity': camp_data.target_quantity if camp_data.target_quantity else '',
'campaign_type': camp_data.get_type_display() if camp_data.type else '',
'outrich_method': camp_data.method.all().values_list('type',flat=True) if camp_data.method.all() else '',
'indursty_type': mapping_data.industry_type if mapping_data.industry_type else '',
'job_title': mapping_data.job_title if mapping_data.job_title else '',
'job_level': mapping_data.job_level if mapping_data.job_level else '',
'assets': term_data.assets if term_data.assets else '',
'delivery_method': delivary_data.delivery_method if delivary_data.delivery_method else '',
'country': mapping_data.country if mapping_data.country else '',
'company_size': mapping_data.company_size if mapping_data.company_size else '',
'revenue': mapping_data.revenue_size if mapping_data.revenue_size else '',
'data_field': delivary_data.data_header if delivary_data.data_header else '',
'custom_data_fields':delivary_data.custom_header if delivary_data.custom_header else '',
'tc_header_status':delivary_data.tc_header_status,
'custom_header_status':delivary_data.custom_header_status,
'pacing': spec_data.campaign_pacing if spec_data.campaign_pacing else '',
'instructions': mapping_data.special_instructions if mapping_data.special_instructions else '',
'useAsTxtMapping': useAsTxtMapping,
'rfq_status': camp_data.rfq,
'campaign':camp_data,
'mapping':mapping_data,
'Custom_question_status':mapping_data.custom_status,
'usertype':request.session['usertype']
}
# return JsonResponse(data)
html = render_to_response('campaign/show_campaign_specs.html', context)
print("html :", html)
return html
@login_required
def rejected_reason_list(request):
""" return reject lead reasons """
user_id = request.session['userid']
# global_reason=leads_rejected_reson.objects.filter(status=0)
client_reason = leads_rejected_reson.objects.filter(
status=1, user_id=user_id)
return render(request, 'lead/rejected_reason_list.html', {'reason': client_reason})
@login_required
def rectify_reason_list(request):
""" return rectify lead reasons """
user_id = request.session['userid']
client_reason = Leads_Rectify_Reason.objects.filter(
status=1, user_id=user_id)
return render(request, 'lead/rectify_reason_list.html', {'reason': client_reason})
@login_required
def individual_campaign_notebook(request, camp_id):
""" individual campaign notebook """
campaign_details = Campaign.objects.filter(id=camp_id)
live_camp_count = Campaign.objects.filter(id=camp_id, status=3).count()
print(live_camp_count)
vendor_list = get_vendor_list_of_campaign(camp_id)
if campaign_details:
return render(request, 'campaign/individual_campaign_notebook.html', {'camps': campaign_details, 'vendor_list': vendor_list,
})
else:
return render(request, 'campaign/individual_campaign_notebook.html', {})
# updating campaign end date from campaign notebook
@login_required
def update_campaign_end_date(request):
""" update campaign end date """
if Campaign.objects.filter(id=request.POST.get('camp_id')).update(end_date=request.POST.get('date')):
data = {'status': 1, 'message': 'date change successfully'}
else:
data = {'status': 2, 'message': 'date change failed'}
return JsonResponse(data)
@login_required
def update_campaign_start_date(request):
""" update campaign end date """
if Campaign.objects.filter(id=request.POST.get('camp_id')).update(start_date=request.POST.get('date')):
data = {'status': 1, 'message': 'date change successfully'}
else:
data = {'status': 2, 'message': 'date change failed'}
return JsonResponse(data)
@login_required
def TC_vendor_list(request):
""" TC vendor list """
vendor_list_details = []
user_id = request.session['userid']
campaigns = Campaign.objects.filter(user=user_id)
vendor_list = campaign_allocation.objects.filter(
campaign_id__in=campaigns, status__in=[1, 5, 4])
for row in vendor_list:
user_details = client_vendor.objects.filter(
user_id=row.client_vendor.id)
if user_details:
if user_details[0] not in vendor_list_details:
vendor_list_details.append(user_details[0])
return render(request, 'vendor1/TC_vendor_list.html', {'vendor_list': vendor_list_details})
def existing_vendor_list(request):
""" TC vendor who are currently working with client """
vendor_list_details = []
user_id = request.session['userid']
campaigns = Campaign.objects.filter(user=user_id)
vendor_list = campaign_allocation.objects.filter(
campaign_id__in=campaigns, status=1)
for row in vendor_list:
user_details = user.objects.filter(id=row.client_vendor.id)
if user_details:
if user_details[0] not in vendor_list_details:
vendor_list_details.append(user_details[0])
return render(request, 'vendor1/existing_vendor_list.html', {'vendor_list': vendor_list_details})
def get_cpl_list(request):
""" return cpl list to superadmin """
camp_id = request.GET.get('camp_id')
user_id = request.session['userid']
get_details = collect_campaign_details(camp_id)
vendor_list = []
tc_quote = []
vendor_list_id = get_external_vendors(user_id)
ext_vendor_list = campaign_allocation.objects.filter(campaign_id=camp_id,client_vendor_id__in=vendor_list_id, status=3, cpl__in=[-1,0], volume=-1)
for vendor in ext_vendor_list:
vendor_list.append({
'id':vendor.client_vendor_id,
'cpl':vendor.rfqcpl,
'volume':vendor.rfqvolume,
'name':vendor.client_vendor.user_name,
})
campaign = Campaign_rfq.objects.filter(campaign_id=camp_id)
for vendor in campaign:
if vendor.status == 1:
tc_quote.append({
'id':vendor.campaign_id,
'cpl':vendor.cpl,
'volume':vendor.volume,
'name':'TC TEAM',
})
vendor_data = []
vendorlist = campaign_allocation.objects.filter(campaign_id=camp_id, status=3,cpl=0)
for vendor in vendorlist:
vendor_data.append({
'id': vendor.client_vendor_id,
'vendor_name': vendor.client_vendor.user_name
})
rfq_timer = Campaign.objects.get(id=camp_id)
rfq_timer = rfq_timer.rfq_timer
# print(vendor_data)
data = {'rfq_timer': rfq_timer,'success': 1,'ext_vendor_list':vendor_list,'details':get_details,'tc_quote':tc_quote,'vendor_data':vendor_data }
return JsonResponse(data)
@login_required
def client_user_access(request):
""" return user access modules to client """
usertypes = usertype.objects.filter(type="external_vendor")
users = []
user_id = request.session['userid']
vendor_list = external_vendor.objects.filter()
for row in vendor_list:
list = ast.literal_eval(row.client_id)
if user_id in list:
user_details = user.objects.get(id=row.user_id)
users.append(user_details)
groups = Client_User_Group.objects.filter(group_owner_id=user_id)
return render(request, 'dashboard/client_user_access.html', {'type':usertypes,'groups':groups,'users':users})
@login_required
def get_user_access(request):
""" return selected user access """
current_user = user.objects.get(id=request.POST.get('userid'))
if request.POST.get('groupid'):
roles = User_Configuration.objects.filter(is_client =True,group__in=[request.POST.get('groupid')]).order_by('position')
access = []
for role in roles:
if current_user in role.user.all():
access.append({
'id': role.id,
'name': role.name,
'url': role.url,
'parent': role.parent.id if role.parent else None,
'checked': 1,
'groupname':role.group.filter(id=request.POST.get('groupid'))[0].group_name,
'groupid':role.group.filter(id=request.POST.get('groupid'))[0].id,
})
else:
access.append({
'id': role.id,
'name': role.name,
'url': role.url,
'parent': role.parent.id if role.parent else None,
'checked': 0,
'groupname':role.group.filter(id=request.POST.get('groupid'))[0].group_name,
'groupid':role.group.filter(id=request.POST.get('groupid'))[0].id,
})
data = {'success': 1, 'user_roles': access}
else:
roles = User_Configuration.objects.filter(is_client =True,user_type__in=request.POST.get('usertype_id')).order_by('position')
access = []
for role in roles:
if current_user in role.user.all():
access.append({
'id': role.id,
'name': role.name,
'url': role.url,
'parent': role.parent.id if role.parent else None,
'checked': 1,
'usertype':role.user_type.filter(id=request.POST.get('usertype_id'))[0].type if request.POST.get('usertype_id') else None,
})
else:
access.append({
'id': role.id,
'name': role.name,
'url': role.url,
'parent': role.parent.id if role.parent else None,
'checked': 0,
'usertype':role.user_type.filter(id=request.POST.get('usertype_id'))[0].type if request.POST.get('usertype_id') else None,
})
data = {'success': 1, 'user_roles': access}
return JsonResponse(data)
def user_and_groups(request):
""" display create group page """
groups = Client_User_Group.objects.filter(group_owner_id=request.session['userid'])
roles = User_Configuration.objects.filter(is_client =True,user=request.session['userid']).order_by('position')
return render(request,'client/user_groups.html',{'groups':groups,'roles':roles})
def add_group(request):
""" group add by client """
client_id = user.objects.get(id=request.session['userid'])
group_type = Client_User_Group.objects.create(group_name=request.POST.get('group'),group_owner=client_id)
data = {'success': 1}
return JsonResponse(data)
def delete_group(request):
""" group delete by client """
client_id = user.objects.get(id=request.session['userid'])
group_type = Client_User_Group.objects.get(id=request.POST.get('group_id'),group_owner=client_id).delete()
data = {'success': 1}
return JsonResponse(data)
def edit_group(request):
""" group add by client """
client_id = user.objects.get(id=request.session['userid'])
if request.method == 'POST':
client_grp = Client_User_Group.objects.get(id=request.POST.get('group_id'),group_owner=client_id)
if client_grp:
client_grp.group_name = request.POST.get('group')
client_grp.save()
data = {'success': 1}
else:
client_grp = Client_User_Group.objects.get(id=request.GET.get('group_id'),group_owner=client_id)
if client_grp:
data = {'success': 1,'group_name':client_grp.group_name}
else:
data = {'success': 2}
return JsonResponse(data)
def password_check(passwd):
SpecialSym =['$', '@', '#', '%', '^', '&', '*', '+', '-', '!']
if len(passwd) < 8:
return False
if not any(char.isdigit() for char in passwd):
return False
if not any(char.isupper() for char in passwd):
return False
if not any(char.islower() for char in passwd):
return False
if not any(char in SpecialSym for char in passwd):
return False
else:
return True
def add_user_to_group(request):
""" add user to group """
username = request.POST.get('username')
email = request.POST.get('email').lower()
pwd = request.POST.get('password')
site_url = settings.BASE_URL
group_id = request.POST.get('group')
client_id = user.objects.get(id=request.session['userid'])
client_group = Client_User_Group.objects.get(id=group_id,group_owner=client_id)
if client_group:
email_check = email_domain_check(email)
t = password_check(pwd)
if t is True:
if email_check['status'] == 1:
if user.objects.filter(email=email).count() == 0:
new_user = user.objects.create(email=email, user_name=username,password=pwd, is_active=1, usertype_id=6)
client_group.group_users.add(new_user)
client_group.save()
data = {'success': 1,'msg':f'{new_user.user_name} added to {client_group.group_name}'}
subject = "Invitation From Techconnetr"
html_message = render_to_string('email_templates/external_user_register_template.html', {
'username': email, 'client':client_id.user_name,'site_url': site_url, 'password': pwd})
plain_message = strip_tags(html_message)
from_email = settings.EMAIL_HOST_USER
to_list = [email]
if send_mail(subject, plain_message, from_email, to_list, fail_silently=True,html_message=html_message):
print('sent mail')
else:
data = {'success': 2,'msg':f'User already exists'}
else:
data = email_check
else:
data = {'success': 0,'msg': 'Password should be greater than 8 characters and a combination of upper and lowercase characters, special symbols and numbers!!'}
else:
data = {'success': 1,'msg':'Group does not exists'}
return JsonResponse(data)
@login_required
def get_group_access(request):
""" return selected user access """
user_group = Client_User_Group.objects.get(id=request.POST.get('groupid'),group_owner=request.session['userid'])
roles = User_Configuration.objects.filter(is_client =True,user=request.session['userid']).order_by('position')
access = []
for role in roles:
if user_group in role.group.all():
access.append({
'id': role.id,
'name': role.name,
'url': role.url,
'parent': role.parent.id if role.parent else None,
'checked': 1,
'usertype':request.session['usertype'],
})
else:
access.append({
'id': role.id,
'name': role.name,
'url': role.url,
'parent': role.parent.id if role.parent else None,
'checked': 0,
'usertype':request.session['usertype'],
})
data = {'success': 1, 'user_roles': access}
return JsonResponse(data)
def grant_group_access(request):
""" grant access to user """
data = request.POST.getlist('access_id[]')
usergroup = Client_User_Group.objects.get(id=request.POST.get('groupid'),group_owner=request.session['userid'])
for access_id in data:
access = User_Configuration.objects.get(id=access_id)
if (access.parent == None):
child = User_Configuration.objects.filter(parent_id=access_id)
if child.count() > 0:
if request.POST.get('parent_value') == 'true':
for role in child:
role.group.add(usergroup)
role.save()
print(role)
if User_Configuration.objects.filter(parent_id=role.id).count() > 0:
grand_child_access_call(role,access_id,usergroup,data,access)
else:
for role in child:
role.group.remove(usergroup)
role.save()
if User_Configuration.objects.filter(parent_id=role.id).count() > 0:
grand_child_access_call(role,access_id,usergroup,data,access)
# import pdb;pdb.set_trace()
if User_Configuration.objects.filter(parent_id=access_id,group__in=[usergroup]).count() == 0:
access.group.remove(usergroup)
else:
access.group.add(usergroup)
access.save()
else:
if usergroup not in access.group.all():
access.group.add(usergroup)
else:
access.group.remove(usergroup)
access.save()
data = {'success':1,'msg':f'Access Permission Changed For {usergroup.group_name}'}
return JsonResponse(data)
def grant_child_group_access(request):
""" child grant access to user """
data = request.POST.getlist('access_id[]')
usergroup = Client_User_Group.objects.get(id=request.POST.get('groupid'),group_owner=request.session['userid'])
for access_id in data:
access = User_Configuration.objects.get(id=access_id)
if (access.parent == None):
child = User_Configuration.objects.filter(parent_id=access_id)
for role in child:
if str(role.id) in data:
if usergroup not in role.group.all():
role.group.add(usergroup)
else:
role.group.remove(usergroup)
role.save()
if User_Configuration.objects.filter(parent_id=role.id).count() > 0:
grand_child_access_call(role,access_id,usergroup,data,access)
if User_Configuration.objects.filter(parent_id=access_id,group__in=[usergroup]).count() == 0:
access.group.remove(usergroup)
else:
access.group.add(usergroup)
access.save()
data = {'success':1,'msg':f'Access Permission Changed For {usergroup.group_name}'}
return JsonResponse(data)
def grant_grand_child_access(request):
""" grant grand child access permision to the group """
data = request.POST.getlist('access_id[]')
usergroup = Client_User_Group.objects.get(id=request.POST.get('groupid'),group_owner=request.session['userid'])
for access_id in data:
access = User_Configuration.objects.get(id=access_id)
if (access.parent == None):
child = User_Configuration.objects.filter(parent_id=access_id)
for sub_menu in child:
if str(sub_menu.id) in data:
grand_child = User_Configuration.objects.filter(parent_id=sub_menu.id)
for grand_menu in grand_child:
if str(grand_menu.id) in data:
if usergroup not in grand_menu.group.all():
grand_menu.group.add(usergroup)
else:
grand_menu.group.remove(usergroup)
grand_menu.save()
if User_Configuration.objects.filter(parent_id=sub_menu.id,group__in=[usergroup]).count() == 0:
sub_menu.group.remove(usergroup)
else:
sub_menu.group.add(usergroup)
if User_Configuration.objects.filter(parent_id=access_id,group__in=[usergroup]).count() == 0:
access.group.remove(usergroup)
else:
access.group.add(usergroup)
access.save()
data = {'success':1,'msg':f'Access Permission Changed For {usergroup.group_name}'}
return JsonResponse(data)
def get_group_users(request):
""" return members in a group """
group_users = []
group = Client_User_Group.objects.get(id=request.GET.get('group_id'),group_owner=request.session['userid'])
for users in group.group_users.all():
group_users.append({
'userid': users.id,
'name': users.user_name,
'email': users.email,
'status':users.is_active,
})
data = {'success':1,'group_users':group_users}
return JsonResponse(data)
def remove_group_user(request):
""" remove user from group and platform """
user.objects.get(id=request.POST.get('user_id')).delete()
data = {'success':1}
return JsonResponse(data)
def action_on_camp_by_client(status, lead_list, campaign_id, vendor_id, vendor, reason=None, status_code=None):
if CampaignTrack.objects.filter(campaign_id=campaign_id).exists():
camp_alloc = campaign_allocation.objects.get(campaign_id=campaign_id, client_vendor_id=vendor_id)
track = CampaignTrack.objects.get(campaign_id=campaign_id)
if status == 1:
status = 'approved'
elif status == 2:
status = 'rejected'
elif status == 3:
status = 'rectify'
if track.client_action_count > 0:
list = eval(track.client_action)
d = {}
d['type'] = 'action'
d['vendor_id'] = vendor_id
d['vendor'] = vendor
d['status'] = status
d['lead_id'] = lead_list
t = datetime.datetime.now()
d['date'] = t.isoformat()
d['reason'] = reason
d['status_code'] = status_code
d['vendor_percentage'] = vendorper(camp_alloc.id)
d['client_percentage'] = percentage(campaign_id)
list.append(d)
track.client_action = list
track.client_action_count += 1
track.save()
# print(list)
# print('hello')
else:
list = []
d = {}
d['type'] = 'action'
d['vendor_id'] = vendor_id
d['vendor'] = vendor
d['status'] = status
d['lead_id'] = lead_list
t = datetime.datetime.now()
d['date'] = t.isoformat()
d['reason'] = reason
d['status_code'] = status_code
d['vendor_percentage'] = vendorper(camp_alloc.id)
d['client_percentage'] = percentage(campaign_id)
list.append(d)
track.client_action = list
track.client_action_count += 1
track.save()
|
[
"tejupatne231995@gmail.com"
] |
tejupatne231995@gmail.com
|
bc231e520ceb1357dc73d5477d07af529ef19a92
|
df9098c0167537c589b43e5d55cd36209a2e3478
|
/drivingsim_vav/fetch_gym/envs/fetch_env.py
|
193ceb8a1239b55cc1d6dac59051a561768fc3e1
|
[
"MIT"
] |
permissive
|
dsbrown1331/vav-icml
|
60ef7ef6947d2ec2b6faae4d5d0e40116f7fab05
|
90f40c2b5b52f3cc142ffd4e02bb82d88e1e221d
|
refs/heads/main
| 2023-06-04T14:30:39.451312
| 2021-06-24T22:34:46
| 2021-06-24T22:34:46
| 375,876,556
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 9,986
|
py
|
import numpy as np
from fetch_gym.envs import robot_env, rotations, utils
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
return np.linalg.norm(goal_a - goal_b, axis=-1)
class FetchEnv(robot_env.RobotEnv):
"""Superclass for all Fetch environments.
"""
def __init__(
self, model_path, n_substeps, gripper_extra_height, block_gripper,
has_object, target_in_the_air, target_offset, obj_range, target_range,
distance_threshold, initial_qpos, reward_type,
):
"""Initializes a new Fetch environment.
Args:
model_path (string): path to the environments XML file
n_substeps (int): number of substeps the simulation runs on every call to step
gripper_extra_height (float): additional height above the table when positioning the gripper
block_gripper (boolean): whether or not the gripper is blocked (i.e. not movable) or not
has_object (boolean): whether or not the environment has an object
target_in_the_air (boolean): whether or not the target should be in the air above the table or on the table surface
target_offset (float or array with 3 elements): offset of the target
obj_range (float): range of a uniform distribution for sampling initial object positions
target_range (float): range of a uniform distribution for sampling a target
distance_threshold (float): the threshold after which a goal is considered achieved
initial_qpos (dict): a dictionary of joint names and values that define the initial configuration
reward_type ('sparse' or 'dense'): the reward type, i.e. sparse or dense
"""
self.gripper_extra_height = gripper_extra_height
self.block_gripper = block_gripper
self.has_object = has_object
self.target_in_the_air = target_in_the_air
self.target_offset = target_offset
self.obj_range = obj_range
self.target_range = target_range
self.distance_threshold = distance_threshold
self.reward_type = reward_type
if model_path == 'fetch/reach.xml':
self.type = 'reach'
elif model_path == 'fetch/reach_test.xml':
self.type = 'reach_test'
else:
self.type = None
super(FetchEnv, self).__init__(
model_path=model_path, n_substeps=n_substeps, n_actions=3,
initial_qpos=initial_qpos)
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal, goal, info):
# Compute distance between goal and the achieved goal.
try:
w = [-0.54, -0.10, 0.83]
table_pos = self.sim.data.get_body_xpos('table')
obstacle_pos = self.sim.data.get_body_xpos('boxobstacle')
goal_pos = self.sim.data.get_body_xpos('goal')
d = goal_distance(achieved_goal, goal)
dist_threshold = 0.35 # ensure that this is the same as in domain.py
x = achieved_goal # TODO: Gleb, this is end_effector position, correct?
goal_dist = 25 * -np.exp(
-np.sqrt((x[0] - goal_pos[0]) ** 2 + (x[1] - goal_pos[1]) ** 2 + (x[2] - goal_pos[2]) ** 2))
table_dist = 5 * -np.exp(-np.sqrt((x[2] - table_pos[2]) ** 2))
obstacle_dist = 40 * (1 - np.exp(
-np.sqrt((x[0] - obstacle_pos[0]) ** 2 + (x[1] - obstacle_pos[1]) ** 2 + (x[2] - obstacle_pos[2]) ** 2)))
final_goal_dist = 250 * np.exp(-d) if d < dist_threshold else 0
return np.array([goal_dist, table_dist, obstacle_dist]).dot(w)
except:
d = goal_distance(achieved_goal, goal)
if self.reward_type == 'sparse':
return -(d > self.distance_threshold).astype(np.float32)
else:
return -d
# RobotEnv methods
# ----------------------------
def _step_callback(self):
if self.block_gripper:
self.sim.data.set_joint_qpos('robot0:l_gripper_finger_joint', 0.)
self.sim.data.set_joint_qpos('robot0:r_gripper_finger_joint', 0.)
self.sim.forward()
def _set_action(self, action):
action = action.copy() # ensure that we don't change the action outside of this scope
pos_ctrl, gripper_ctrl = action[:3], 0
pos_ctrl *= 0.05 # limit maximum change in position
rot_ctrl = [1., 0., 1., 0.] # fixed rotation of the end effector, expressed as a quaternion
# rot_ctrl = action[3:]
gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
assert gripper_ctrl.shape == (2,)
if self.block_gripper:
gripper_ctrl = np.zeros_like(gripper_ctrl)
action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])
utils.ctrl_set_action(self.sim, action)
utils.mocap_set_action(self.sim, action)
def _get_obs(self):
# positions
grip_pos = self.sim.data.get_site_xpos('robot0:grip')
dt = self.sim.nsubsteps * self.sim.model.opt.timestep
grip_velp = self.sim.data.get_site_xvelp('robot0:grip') * dt
robot_qpos, robot_qvel = utils.robot_get_obs(self.sim)
if self.has_object:
object_pos = self.sim.data.get_site_xpos('object0')
# rotations
object_rot = rotations.mat2euler(self.sim.data.get_site_xmat('object0'))
# velocities
object_velp = self.sim.data.get_site_xvelp('object0') * dt
object_velr = self.sim.data.get_site_xvelr('object0') * dt
# gripper state
object_rel_pos = object_pos - grip_pos
object_velp -= grip_velp
else:
object_pos = object_rot = object_velp = object_velr = object_rel_pos = np.zeros(0)
gripper_state = robot_qpos[-2:]
gripper_vel = robot_qvel[-2:] * dt # change to a scalar if the gripper is made symmetric
if not self.has_object:
achieved_goal = grip_pos.copy()
else:
achieved_goal = np.squeeze(object_pos.copy())
obs = np.concatenate([
grip_pos, object_pos.ravel(), object_rel_pos.ravel(), gripper_state, object_rot.ravel(),
object_velp.ravel(), object_velr.ravel(), grip_velp, gripper_vel,
])
return {
'observation': obs.copy(),
'achieved_goal': achieved_goal.copy(),
'desired_goal': self.goal.copy(),
}
def _viewer_setup(self):
body_id = self.sim.model.body_name2id('robot0:torso_lift_link')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self.viewer.cam.lookat[idx] = value
self.viewer.cam.distance = 2.5
self.viewer.cam.azimuth = 132.
self.viewer.cam.elevation = -14.
def _render_callback(self):
# Visualize target.
sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()
site_id = self.sim.model.site_name2id('target0')
self.sim.model.site_pos[site_id] = self.goal - sites_offset[0]
self.sim.forward()
def _reset_sim(self):
self.sim.set_state(self.initial_state)
### COMMENT OUT WHEN RUNNING SIMULATION EXPERIMENTS -- ONLY REQUIRED FOR HUMAN EXP
# Setting initial state for Fetch Move
if self.type:
names = ["shoulder_pan_joint", "shoulder_lift_joint", "upperarm_roll_joint", "elbow_flex_joint",
"forearm_roll_joint", "wrist_flex_joint", "wrist_roll_joint"]
names = ["robot0:" + n for n in names]
values = [1.364, -0.294, -2.948, 0.906, -0.275, -1.206, 3.086]
if self.type == 'reach_test':
values[0] = -values[0]
for i in range(len(names)):
self.sim.data.set_joint_qpos(names[i], values[i])
# Randomize start position of object.
# if self.has_object:
# object_xpos = self.initial_gripper_xpos[:2]
# while np.linalg.norm(object_xpos - self.initial_gripper_xpos[:2]) < 0.1:
# object_xpos = self.initial_gripper_xpos[:2] + self.np_random.uniform(-self.obj_range, self.obj_range, size=2)
# object_qpos = self.sim.data.get_joint_qpos('object0:joint')
# assert object_qpos.shape == (7,)
# object_qpos[:2] = object_xpos
# self.sim.data.set_joint_qpos('object0:joint', object_qpos)
self.sim.forward()
return True
def _sample_goal(self):
goal = np.array([1.45, 0.75, 0.42])
return goal
# if self.has_object:
# goal = self.initial_gripper_xpos[:3]
# goal[0] += 0.06
# goal += self.target_offset
# goal[2] = self.height_offset
# else:
# goal = self.initial_gripper_xpos[:3]
# return goal.copy()
def _is_success(self, achieved_goal, desired_goal):
d = goal_distance(achieved_goal, desired_goal)
return (d < self.distance_threshold).astype(np.float32)
def _env_setup(self, initial_qpos):
for name, value in initial_qpos.items():
self.sim.data.set_joint_qpos(name, value)
utils.reset_mocap_welds(self.sim)
self.sim.forward()
# Move end effector into position.
gripper_target = np.array([-0.7, -0.3, -0.431 + self.gripper_extra_height]) + self.sim.data.get_site_xpos('robot0:grip')
gripper_rotation = np.array([1., 0., 1., 0.])
self.sim.data.set_mocap_pos('robot0:mocap', gripper_target)
self.sim.data.set_mocap_quat('robot0:mocap', gripper_rotation)
for _ in range(10):
self.sim.step()
# Extract information for sampling goals.
self.initial_gripper_xpos = self.sim.data.get_site_xpos('robot0:grip').copy()
if self.has_object:
self.height_offset = self.sim.data.get_site_xpos('object0')[2]
|
[
"dsbrown1331@gmail.com"
] |
dsbrown1331@gmail.com
|
3ba7356fdb9012933809c6514df3118f37343543
|
a2b20597759990445081057d35d113434cfcf970
|
/stubs/typeshed/typeshed/stdlib/multiprocessing/forkserver.pyi
|
df435f00ebe770e9d572a874b269089844b7dca9
|
[
"MIT",
"Apache-2.0"
] |
permissive
|
facebook/pyre-check
|
34059599c02b65605c574f13555229f3b931fd4e
|
fe8ccedc572cc1faa1fd01e9138f65e982875002
|
refs/heads/main
| 2023-09-03T19:10:11.587028
| 2023-09-02T07:40:35
| 2023-09-02T07:40:35
| 110,274,488
| 6,703
| 575
|
MIT
| 2023-09-13T17:02:32
| 2017-11-10T17:31:36
|
OCaml
|
UTF-8
|
Python
| false
| false
| 1,064
|
pyi
|
from _typeshed import FileDescriptorLike, Unused
from collections.abc import Sequence
from struct import Struct
from typing import Any
__all__ = ["ensure_running", "get_inherited_fds", "connect_to_new_process", "set_forkserver_preload"]
MAXFDS_TO_SEND: int
SIGNED_STRUCT: Struct
class ForkServer:
def set_forkserver_preload(self, modules_names: list[str]) -> None: ...
def get_inherited_fds(self) -> list[int] | None: ...
def connect_to_new_process(self, fds: Sequence[int]) -> tuple[int, int]: ...
def ensure_running(self) -> None: ...
def main(
listener_fd: int | None,
alive_r: FileDescriptorLike,
preload: Sequence[str],
main_path: str | None = None,
sys_path: Unused = None,
) -> None: ...
def read_signed(fd: int) -> Any: ...
def write_signed(fd: int, n: int) -> None: ...
_forkserver: ForkServer = ...
ensure_running = _forkserver.ensure_running
get_inherited_fds = _forkserver.get_inherited_fds
connect_to_new_process = _forkserver.connect_to_new_process
set_forkserver_preload = _forkserver.set_forkserver_preload
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
43479fbf0082baa527fe671a3c356aad32639f5d
|
4c84cdd3ac80b9121aab75a9950b713633876c86
|
/src/GetFromfileURL.py
|
86386b57f50d2f6b5232cf48fa1f34c8d67c6da0
|
[
"MIT"
] |
permissive
|
Yang-33/vjudge-atcoder-submitID
|
5a8d687211b0e5ac2965bb3cdf01f6a47ba679ad
|
5b87594322a337e6acb25c84470d273427413445
|
refs/heads/master
| 2021-05-03T04:32:15.053231
| 2018-02-08T15:04:16
| 2018-02-08T15:04:16
| 120,618,794
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 158
|
py
|
def GetfromfileURL(filename):
URL = []
with open(filename,'r') as File:
for line in File:
URL.append(line)
return URL
|
[
"kasai.yuta0810@gmail.com"
] |
kasai.yuta0810@gmail.com
|
87755ae2bc42ff08eecb4df2ddff94c8c977fea3
|
62e58c051128baef9452e7e0eb0b5a83367add26
|
/edifact/D13A/REGENTD13AUN.py
|
0525003744529b5867628652bbb213f144f0eef8
|
[] |
no_license
|
dougvanhorn/bots-grammars
|
2eb6c0a6b5231c14a6faf194b932aa614809076c
|
09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d
|
refs/heads/master
| 2021-05-16T12:55:58.022904
| 2019-05-17T15:22:23
| 2019-05-17T15:22:23
| 105,274,633
| 0
| 0
| null | 2017-09-29T13:21:21
| 2017-09-29T13:21:21
| null |
UTF-8
|
Python
| false
| false
| 3,511
|
py
|
#Generated by bots open source edi translator from UN-docs.
from bots.botsconfig import *
from edifact import syntax
from recordsD13AUN import recorddefs
structure = [
{ID: 'UNH', MIN: 1, MAX: 1, LEVEL: [
{ID: 'BGM', MIN: 1, MAX: 1},
{ID: 'CUX', MIN: 0, MAX: 1},
{ID: 'PNA', MIN: 1, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'COM', MIN: 0, MAX: 9},
]},
{ID: 'DOC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'COM', MIN: 0, MAX: 9},
{ID: 'GEI', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
{ID: 'MOA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'PAI', MIN: 0, MAX: 1},
{ID: 'FII', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'EVE', MIN: 1, MAX: 99, LEVEL: [
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
]},
{ID: 'DSI', MIN: 1, MAX: 99999, LEVEL: [
{ID: 'PNA', MIN: 0, MAX: 9},
{ID: 'REL', MIN: 0, MAX: 1},
{ID: 'RFF', MIN: 0, MAX: 9},
{ID: 'EVE', MIN: 1, MAX: 999, LEVEL: [
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 99},
{ID: 'REL', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'PNA', MIN: 0, MAX: 9},
{ID: 'RFF', MIN: 0, MAX: 999},
{ID: 'NAT', MIN: 0, MAX: 2},
{ID: 'PDI', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'COM', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 0, MAX: 99},
{ID: 'FTX', MIN: 0, MAX: 999},
{ID: 'ATT', MIN: 0, MAX: 99, LEVEL: [
{ID: 'GEI', MIN: 0, MAX: 1},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'MEA', MIN: 0, MAX: 1},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
{ID: 'MOA', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
{ID: 'QTY', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 99},
]},
{ID: 'DOC', MIN: 0, MAX: 99, LEVEL: [
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'TAX', MIN: 0, MAX: 9, LEVEL: [
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'FTX', MIN: 0, MAX: 9},
]},
{ID: 'EMP', MIN: 0, MAX: 9, LEVEL: [
{ID: 'ATT', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
{ID: 'DTM', MIN: 0, MAX: 9},
]},
{ID: 'ICD', MIN: 0, MAX: 9, LEVEL: [
{ID: 'PNA', MIN: 0, MAX: 9, LEVEL: [
{ID: 'ADR', MIN: 0, MAX: 9},
{ID: 'LOC', MIN: 0, MAX: 9},
]},
]},
]},
]},
]},
{ID: 'UNT', MIN: 1, MAX: 1},
]},
]
|
[
"jason.capriotti@gmail.com"
] |
jason.capriotti@gmail.com
|
de135e77c6e7eb05a6ea85251c74670b01fd1e46
|
f78141cb2d1a8e10ee45739a0bc0f5a3b50182b1
|
/printpdfslides.py
|
c6e23000edc6bfdca6fce44195d7b856cc2802af
|
[] |
no_license
|
mprat/MEET-sessions
|
329a4193ac4e409b1702a8901f6d2db1516557d8
|
9295bd1b7379d30ef1acfc81af27f66736041d16
|
refs/heads/master
| 2021-01-17T09:33:36.008808
| 2014-06-24T08:36:52
| 2014-06-24T08:36:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 552
|
py
|
import io
import sys
if len(sys.argv) != 2:
print "type the name of the notebook as the command-line argument"
else:
notebook = sys.argv[1]
path = notebook + '.slides.html'
flag = u'@media print{*{text-shadow:none !important;color:#000 !important'
with io.open(path, 'r') as in_file:
data = in_file.readlines()
for i, line in enumerate(data):
if line[:64] == flag:
data[i] = data[i].replace('color:#000 !important;', '')
with io.open(path, 'w') as out_file:
out_file.writelines(data)
print "You can now print your slides"
|
[
"michele.pratusevich@gmail.com"
] |
michele.pratusevich@gmail.com
|
e7473bdf2d22f720b9d59aa51182d6e104d57893
|
d6e36762821f30c589d335e1cedb937280741739
|
/stubs/openpyxl/openpyxl/chart/pie_chart.pyi
|
a5ba7cb1f9e12bf770623d12b40f49dd651fb916
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
nobuggy/typeshed
|
4c20c86c14878b667e587a72fd2bdd25454413cf
|
d1bfd08b4bc843227d097decfd99d70272a1f804
|
refs/heads/master
| 2023-05-26T14:51:54.136347
| 2023-05-10T15:06:41
| 2023-05-10T15:06:41
| 72,130,850
| 0
| 0
| null | 2016-10-27T17:16:09
| 2016-10-27T17:16:09
| null |
UTF-8
|
Python
| false
| false
| 2,623
|
pyi
|
from _typeshed import Incomplete, Unused
from abc import abstractmethod
from typing_extensions import Literal
from openpyxl.chart.axis import ChartLines
from openpyxl.chart.label import DataLabelList
from openpyxl.descriptors.base import Alias, Typed
from openpyxl.descriptors.excel import ExtensionList
from openpyxl.descriptors.serialisable import Serialisable
from ._chart import ChartBase
class _PieChartBase(ChartBase):
varyColors: Incomplete
ser: Incomplete
dLbls: Typed[DataLabelList, Literal[True]]
dataLabels: Alias
__elements__: Incomplete
def __init__(self, varyColors: bool = True, ser=(), dLbls: DataLabelList | None = None) -> None: ...
@property
@abstractmethod
def tagname(self) -> str: ...
class PieChart(_PieChartBase):
tagname: str
varyColors: Incomplete
ser: Incomplete
dLbls: Incomplete
firstSliceAng: Incomplete
extLst: Typed[ExtensionList, Literal[True]]
__elements__: Incomplete
def __init__(self, firstSliceAng: int = 0, extLst: Unused = None, **kw) -> None: ...
class PieChart3D(_PieChartBase):
tagname: str
varyColors: Incomplete
ser: Incomplete
dLbls: Incomplete
extLst: Typed[ExtensionList, Literal[True]]
__elements__: Incomplete
class DoughnutChart(_PieChartBase):
tagname: str
varyColors: Incomplete
ser: Incomplete
dLbls: Incomplete
firstSliceAng: Incomplete
holeSize: Incomplete
extLst: Typed[ExtensionList, Literal[True]]
__elements__: Incomplete
def __init__(self, firstSliceAng: int = 0, holeSize: int = 10, extLst: Unused = None, **kw) -> None: ...
class CustomSplit(Serialisable):
tagname: str
secondPiePt: Incomplete
__elements__: Incomplete
def __init__(self, secondPiePt=()) -> None: ...
class ProjectedPieChart(_PieChartBase):
tagname: str
varyColors: Incomplete
ser: Incomplete
dLbls: Incomplete
ofPieType: Incomplete
type: Alias
gapWidth: Incomplete
splitType: Incomplete
splitPos: Incomplete
custSplit: Typed[CustomSplit, Literal[True]]
secondPieSize: Incomplete
serLines: Typed[ChartLines, Literal[True]]
join_lines: Alias
extLst: Typed[ExtensionList, Literal[True]]
__elements__: Incomplete
def __init__(
self,
ofPieType: str = "pie",
gapWidth: Incomplete | None = None,
splitType: str = "auto",
splitPos: Incomplete | None = None,
custSplit: CustomSplit | None = None,
secondPieSize: int = 75,
serLines: ChartLines | None = None,
extLst: Unused = None,
**kw,
) -> None: ...
|
[
"noreply@github.com"
] |
nobuggy.noreply@github.com
|
c2b924b0282404ab40c9b7abf7ed0cb453c95c82
|
c1c46664b27d051bfba4a9682d87aa222878ed4d
|
/example/hello.py
|
8fe668ebaf18a1f30404835ff037007a07f950ee
|
[] |
no_license
|
xen/sanic-jinja2
|
89c272f09c29d9b86bca6adbbd14ff78a01b8e9c
|
d47150c840484a45465be00350975901483e9950
|
refs/heads/master
| 2021-05-13T19:04:15.228612
| 2019-01-10T11:54:59
| 2019-01-10T11:54:59
| 116,883,658
| 1
| 0
| null | 2019-01-09T23:11:19
| 2018-01-09T23:42:27
|
Python
|
UTF-8
|
Python
| false
| false
| 1,103
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from sanic import Sanic
from sanic_session import InMemorySessionInterface
from sanic_jinja2 import SanicJinja2
app = Sanic()
jinja = SanicJinja2(app)
session = InMemorySessionInterface(cookie_name=app.name, prefix=app.name)
@app.middleware("request")
async def add_session_to_request(request):
# before each request initialize a session
# using the client's request
await session.open(request)
@app.middleware("response")
async def save_session(request, response):
# after each request save the session,
# pass the response to set client cookies
await session.save(request, response)
@app.route("/")
async def index(request):
request["flash"]("success message", "success")
request["flash"]("info message", "info")
request["flash"]("warning message", "warning")
request["flash"]("error message", "error")
request["session"]["user"] = "session user"
return jinja.render("index.html", request, greetings="Hello, sanic!")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8000, debug=True)
|
[
"xuzenglin@gmail.com"
] |
xuzenglin@gmail.com
|
a1b08ef48c0364a480cb6ad6021eabc885470856
|
c61145e8771724575f67ae5738dd6cbb9626a706
|
/user_profile/serializers.py
|
26c0f3f64190e52de4dff520d28ce9b21132e9d9
|
[] |
no_license
|
Seredyak1/test_task
|
1399dd082f4281ca6f72d036f4df4c1c6945dafe
|
a5d433b827df46ffa95dd6dd91245b204884674f
|
refs/heads/master
| 2020-04-16T08:03:04.521740
| 2019-01-16T09:33:47
| 2019-01-16T09:33:47
| 165,409,648
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
from django.contrib.auth.models import User
from rest_framework import serializers
class RegistrationSerializer(serializers.ModelSerializer):
"""Serializers registration requests and creates a new user."""
password = serializers.CharField(
max_length=32,
min_length=4,
write_only=True
)
class Meta:
model = User
fields = ['id', 'email', 'first_name', 'last_name', 'username', 'password']
read_only_fields = ('id', )
def create(self, validated_data):
return User.objects.create_user(**validated_data)
class UserSerializer(serializers.ModelSerializer):
"""Serializers detail fields about user"""
class Meta:
model = User
fields = ['id', 'email', 'first_name', 'last_name', 'username']
|
[
"sanya.seredyak@gmail.com"
] |
sanya.seredyak@gmail.com
|
a9c219c55a6c714a749abe05fee1b4855327675c
|
55736be550881db62a03e9dfe81307f57617b3e5
|
/noodles/serial/namedtuple.py
|
8300af856f66dfe106926fe907061ad0827d8496
|
[
"Apache-2.0"
] |
permissive
|
atzeus/noodles
|
a19dbf7d909b1714279f6864171005a84b1e37db
|
2d608de26cf6d4070dc1cbf6491419163e446d4b
|
refs/heads/master
| 2021-08-24T06:50:01.732948
| 2017-12-08T10:25:56
| 2017-12-08T10:25:56
| 113,578,184
| 0
| 0
| null | 2017-12-08T13:37:47
| 2017-12-08T13:37:47
| null |
UTF-8
|
Python
| false
| false
| 1,041
|
py
|
from .registry import (Registry, Serialiser)
from ..utility import (object_name, look_up)
class SerNamedTuple(Serialiser):
def __init__(self, cls):
super(SerNamedTuple, self).__init__(cls)
def encode(self, obj, make_rec):
return make_rec(tuple(obj))
def decode(self, cls, data):
return cls(*data)
class SerAutoNamedTuple(Serialiser):
def __init__(self):
super(SerAutoNamedTuple, self).__init__('<namedtuple>')
def encode(self, obj, make_rec):
return make_rec({
'name': object_name(type(obj)),
'data': tuple(obj)})
def decode(self, cls, data):
return look_up(data['name'])(*data['data'])
def is_namedtuple(obj):
return isinstance(obj, tuple) and hasattr(obj, '_fields')
def namedtuple_hook(obj):
if is_namedtuple(obj):
return '<namedtuple>'
else:
return None
def registry():
return Registry(
hooks={
'<namedtuple>': SerAutoNamedTuple()
},
hook_fn=namedtuple_hook)
|
[
"j.hidding@esciencecenter.nl"
] |
j.hidding@esciencecenter.nl
|
d0ce41cc1efceafd9623ff9a80888835debd0069
|
f247b81da7a7f2cf6f0359764fd67d49d7394596
|
/python/ca_model.py
|
e39803c363075632f68aee5c4a42e3450dc1cc6d
|
[] |
no_license
|
parameterized/neural-ca
|
2147b45a33d543032f56be4802e882488aef1f79
|
5bbfb056f18bf34858c06cbaf5848f09cb1fcb6d
|
refs/heads/master
| 2023-04-03T01:32:10.040110
| 2022-09-20T03:34:56
| 2022-09-20T03:34:56
| 240,561,234
| 2
| 1
| null | 2023-03-25T00:36:10
| 2020-02-14T17:16:35
|
Lua
|
UTF-8
|
Python
| false
| false
| 1,421
|
py
|
import numpy as np
import tensorflow as tf
from tensorflow.keras.layers import Conv2D
def get_living_mask(x):
alpha = x[:, :, :, 3:4]
return tf.nn.max_pool2d(alpha, 3, [1, 1, 1, 1], 'SAME') > 0.1
class CAModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.model = tf.keras.Sequential([
Conv2D(128, 1, activation=tf.nn.relu, input_shape=(None, None, 16 * 3)),
Conv2D(16, 1, kernel_initializer='zeros')
])
self(tf.zeros([1, 3, 3, 16])) # build model
@tf.function
def perceive(self, x):
identity = np.float32([0, 1, 0])
identity = np.outer(identity, identity)
sobel_x = np.outer([1, 2, 1], [-1, 0, 1]) / 8.
sobel_x = sobel_x.astype('float32')
sobel_y = sobel_x.T
kernel = tf.stack([identity, sobel_x, sobel_y], -1)[:, :, None, :]
kernel = tf.repeat(kernel, 16, 2)
y = tf.nn.depthwise_conv2d(x, kernel, [1, 1, 1, 1], 'SAME')
return y
@tf.function
def call(self, x):
pre_life_mask = get_living_mask(x)
x_p = self.perceive(x)
dx = self.model(x_p)
update_mask = tf.random.uniform(tf.shape(x[:, :, :, :1])) < 0.5
x += dx * tf.cast(update_mask, tf.float32)
post_life_mask = get_living_mask(x)
life_mask = pre_life_mask & post_life_mask
return x * tf.cast(life_mask, tf.float32)
|
[
"troyk212@gmail.com"
] |
troyk212@gmail.com
|
1521683d07646c7e056b0d4a8d501fb5a9f6137d
|
d529a8f43f6bcac487f3ab9ad76c61aabb91a508
|
/Extra long factorials.py
|
ca054c3ceca1133a2c461ada6da5c92200f4596b
|
[
"MIT"
] |
permissive
|
abhisheks008/Developer-Students-Club-UEM-Kolkata-CP-Contest
|
e8aaeafa648b096acf7f208cf9bddf8f2a6d7d29
|
d141b8872fb3e8c0b37c182690f8df9385a58953
|
refs/heads/main
| 2023-01-12T01:56:22.615080
| 2020-11-14T03:34:30
| 2020-11-14T03:34:30
| 310,984,396
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
# Extra long factorials of DSC UEM Kolkata Competitive Programming Contest
# Solution in Python Programming Language
# Author Abhishek Sharma
# Date : 08.11.2020
from math import factorial as f
print (f(int(input())))
|
[
"noreply@github.com"
] |
abhisheks008.noreply@github.com
|
762a77d82a2ed0c4c1b8939e46c0ac75ff59cfe8
|
56db3cb3e93ea92604dad79371e233bcf1cffad9
|
/Greenspace/settings.py
|
63b8f5087d069fe5f6d5daa9d749195e5f372be5
|
[] |
no_license
|
devendra-pandey/Greenspace
|
52c2a0b6e9e9b133e45d188b50e1400e4ef304f9
|
d858a4c6f83771b12241a341e02f82e0f6194996
|
refs/heads/master
| 2022-04-14T23:09:12.541102
| 2020-02-14T17:15:20
| 2020-02-14T17:15:20
| 240,560,567
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,346
|
py
|
"""
Django settings for Greenspace project.
Generated by 'django-admin startproject' using Django 3.0.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'z1-e-_@!&hfi_)eue_v343bm2&#+2d&ybn0jdgvn4=te664@ro'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'green_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'allauth', # new
'allauth.account', # new
'allauth.socialaccount', # new
'stripe',
'crispy_forms',
'bootstrap4',
'plants_home',
'users.apps.UsersConfig',
'pages.apps.PagesConfig',
]
# Stripe and Braintree Settings
if DEBUG:
# test keys
STRIPE_PUBLISHABLE_KEY = 'pk_test_KmefFlTQ36doIiPF80ZACFPF00emHSn9bR'
STRIPE_SECRET_KEY = 'sk_test_WTOGrtgDVQX24LVIn6bvZjqW008FA2cKEK'
BT_ENVIRONMENT='sandbox'
BT_MERCHANT_ID='z4vj27p38hvdd245'
BT_PUBLIC_KEY='f2zwxc4qp3x8r4pw'
BT_PRIVATE_KEY='a36911908d94e2f1c45bc6875cb70306'
else:
# live keys
STRIPE_PUBLISHABLE_KEY = 'pk_test_KmefFlTQ36doIiPF80ZACFPF00emHSn9bR'
STRIPE_SECRET_KEY = 'sk_test_WTOGrtgDVQX24LVIn6bvZjqW008FA2cKEK'
FIXTURE_DIRS = (
'/Greenspace/Greenspace/plants_home/fixtures/',
)
AUTH_USER_MODEL = 'users.CustomUser'
# emaillogin_project/settings.py
# EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
"django.contrib.auth.backends.ModelBackend",
# `allauth` specific authentication methods, such as login by e-mail
"allauth.account.auth_backends.AuthenticationBackend",
)
SITE_ID = 1
ACCOUNT_SESSION_REMEMBER = True
ACCOUNT_SIGNUP_PASSWORD_ENTER_TWICE = False
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_UNIQUE_EMAIL = True
# ACCOUNT_ADAPTER = 'Greenspace.users.adapters.AccountAdapter'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST_USER = 'greenspace173@gmail.com'
EMAIL_HOST_PASSWORD = 'plotno.143'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'Greenspace.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'plants_home/templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'Greenspace.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.mysql',
# 'NAME': 'test12',
# 'USER': 'root',
# 'PASSWORD': 'spectre',
# 'HOST': 'localhost',
# 'PORT': '3306',
# }
# }
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = '/'
ACCOUNT_LOGOUT_REDIRECT_URL = '/'
|
[
"devendra.dpandey02@gmail.com"
] |
devendra.dpandey02@gmail.com
|
a42db2ea9b8fe08f8e0eec1d9a55e6ccac6aaeae
|
8f3751ae9249cd013d754ce8dc5e83ad4bbc6a02
|
/output.py
|
4c27ed09fedaf183261384cc1ffdf87eedbdd86b
|
[] |
no_license
|
ishidur/NeuralNetworkPlotter
|
b7bfc08afae21a554f28cf0046f1ef106ada9c68
|
4e3511a93ab2d5c0878cd68b6d9ff95ace84eb1a
|
refs/heads/master
| 2021-03-27T10:36:17.293905
| 2017-08-30T04:43:55
| 2017-08-30T04:43:55
| 101,734,943
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,397
|
py
|
import numpy as np
import matplotlib.pyplot as plt, numpy as np
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
from matplotlib import cm
from pylab import *
def sigmoid(x):
return 1 / (1 + np.exp(-x))
# Components of network(l=0,1,2,,,)
# neurons = np.array([2, 2, 1])
# str="2069,6.36879, 4.58608, 6.92113, 4.34442, -2.47962, -6.79904, 6.35838, -6.4943, -3.0653, 0.00999636"
# neurons = np.array([2, 3, 1])
# str="599,5.86581, -1.6873, 4.31575, 5.14747, 5.76717, 4.32641, -2.35129, -2.66479, -6.65975, 6.44388, 0.275338, -7.0809, -3.04433, 0.00990169"
# neurons = np.array([2, 4, 1])
# str="489,3.43153, 5.37924, 3.09733, -4.81061, 3.89932, -2.03991, 4.08778, 5.30856, -1.60353, -3.65043, -5.78128, -3.45817, 2.4249, 4.37504, -5.60206, 5.23461, -2.94186, 0.00974184"
# str="559,4.02681, 5.58551, -0.7994, 4.56916, 5.15392, 2.66137, 4.70171, 4.55158, -1.90552, -1.70361, -2.20378, -7.02321, 4.52788, 2.66148, 0.48565, -7.59381, -3.45308, 0.00998716"
# neurons = np.array([2, 2, 2, 1])
# str="1109,4.44087, 3.74287, 3.62482, 6.55838, -5.97092, -2.22584, 0.882932, 6.7804, 6.93495, 2.71067, -3.1506, -6.41817, 6.04266, -6.42694, -2.83843, 0.00994148"
# neurons = np.array([2, 3, 3, 1])
# str="659,2.94228, 6.20703, -2.15972, 3.93808, 0.541414, 5.70829, -4.48722, -3.09875, -2.88049, 0.410709, 2.58557, 5.1793, -3.65896, 5.98425, 2.29711, 5.80167, 0.759044, 2.06787, -3.63505, -2.65884, -6.25454, 5.50012, 5.79299, -6.6336, -2.97932, 0.00997591"
# neurons = np.array([2, 4, 4, 1])
# str="419,-1.74429, -0.864807, 2.92453, 6.19674, 4.60773, 4.9703, 2.47731, -0.3114, -1.48083, -1.94901, -1.82405, -2.70423, 1.29178, 1.90646, 3.67907, 3.17465, 1.96039, 2.25654, 3.70381, 2.85224, 3.2566, 1.70034, -0.0547112, -0.998359, 4.10569, 1.0241, -2.77683, -5.12003, -2.3828, -2.10662, 0.017373, -2.54969, 3.65127, -0.0783893, -5.69896, 6.04371, -0.56462, 0.00975528"
neurons = np.array([2, 3, 3, 3, 1])
str="9 -0.562377 -0.275068 6.08234 4.46247 4.61822 0.283341 -1.96811 -2.19457 -3.17061 2.26009 1.31546 2.51885 2.41162 1.15844 2.64848 1.26812 -5.03426 0.406915 -2.35325 1.09903 -2.36015 0.256109 2.36763 1.99073 4.24761 -2.63358 0.657033 1.2347 2.28027 2.57662 -2.88671 -1.32138 -2.70352 0.117933 0.0294466 0.0970494 -0.0267851 0.500658"
# str = "489,-0.984455, 6.36258, 1.31512, 5.34308, -0.259602, 3.90947, -2.3082, -2.99422, -2.51795, 3.53831, 2.97514, -0.236202, -0.607421, -3.18126, 5.8468, 2.93817, 1.88244, 1.20371, -2.67916, -1.07294, -3.15374, 4.6581, 1.7776, 4.60723, 3.82906, 3.63762, 0.860046, -2.19369, -5.37426, 5.4209, -0.674578, -2.15118, -2.18974, -5.85983, 5.7832, 4.95376, -1.68951, 0.00979133"
# str = "509,-1.70781, -0.173122, 6.26719, 4.03829, 5.46069, 0.332685, -1.46569, -2.68219, -3.26497, 2.72309, 2.70356, -0.798178, 3.97015, 2.84702, 0.575163, 0.696121, -3.09378, 5.53267, -3.29616, -1.55952, -2.40554, 1.96858, 4.14752, 4.92738, 4.21106, 0.681107, 3.92218, -4.62442, 5.4875, -1.79962, -3.12863, -2.17762, -1.15241, 6.11073, 4.92902, -5.754, -1.81388, 0.00969941"
# neurons = np.array([2, 4, 4, 4, 1])
# str="409,-0.697841, -0.897475, 5.89155, 2.80258, 4.32837, 4.94154, -0.0758867, 1.80829, -1.87105, -2.07275, -2.86402, -2.39909, 2.84368, -0.299338, 2.03284, 2.06372, 3.15449, -0.234366, 2.27524, 2.24656, -0.858545, 4.18735, 0.819846, -3.43302, 0.793896, 2.29869, 1.3733, -0.989186, -2.66895, -2.69498, -2.75059, -0.316513, 2.92055, 2.15683, 3.23589, 3.96045, -0.395392, -4.30458, 4.23023, -2.10206, 1.7894, 0.402521, 4.02238, 1.7857, 1.19637, 3.48859, -1.19052, 3.0998, -1.77703, -2.83522, -2.14077, -0.856841, -1.99668, 5.88102, 4.98794, -4.54769, -1.24244, 0.00964238"
# neurons = np.array([2, 3, 3, 3, 3, 1])
# str="1669,2.99655, 5.45868, -0.871209, 2.45073, 1.81667, 6.21747, -3.67208, -1.95684, -1.94578, 3.4826, 2.06384, 2.07905, 5.06989, 0.28875, 0.754481, 0.95933, 2.81964, 2.38933, -1.7991, -3.83407, -3.94271, 5.73188, -0.439558, -2.27671, 2.1846, 2.73404, 3.24917, 2.38703, 2.75653, 3.11856, -3.10315, -2.82318, -1.26783, -2.63199, 5.57446, -3.06424, 3.04046, 3.45285, 3.4489, 3.56043, 1.38017, 3.99581, 0.219994, -3.39757, -0.0944336, -3.16128, 4.8351, -4.39442, -0.271265, 0.00887238"
# neurons = np.array([2, 4, 4, 4, 4, 1])
# str="569,-1.40349, 4.73401, 4.39454, 0.595671, 4.97564, 0.42259, 0.771733, 4.30004, -1.80419, -2.58622, -2.56936, -2.5129, 1.83581, 3.4392, -0.204556, 0.398763, 0.792568, -1.43016, 2.91771, 2.28341, 0.879131, -1.14803, 2.79282, 2.22344, 2.18047, 3.13294, 0.705359, 1.13135, -2.82744, -1.78581, -3.03655, -2.89323, 1.03061, 1.89421, 2.23595, 2.15343, -2.15692, 0.608627, 3.56931, 3.36424, 4.22728, 2.43007, -0.47886, -0.399374, 3.16718, 2.28146, 0.342999, 0.369826, -3.10989, -2.88342, -2.5328, -2.45013, 2.24708, 5.1517, 3.20658, -3.59481, 3.07095, 1.6612, 3.52622, -0.620736, 2.52098, -0.216395, 2.42076, 4.33917, 2.43201, -0.209252, 2.35339, 4.06447, -2.22535, -3.26955, -2.67393, 0.342861, 4.07401, -5.84408, 5.11776, -5.81011, -0.2846, 0.00981299"
# str="439,-0.287801, -0.370451, 6.29973, 0.653061, 4.23102, 3.71699, 0.0611724, 3.48873, -2.04225, -1.74314, -3.15629, -2.23772, 1.43904, 1.49788, 0.789733, 2.19032, 1.17484, 1.23724, 0.882246, 1.98111, 2.07095, 1.81887, -5.23016, -1.6156, 1.52207, 1.54486, 0.0823191, 1.76094, -2.96707, -2.88946, 1.40339, -1.80337, 1.79505, 1.7683, 2.27846, -1.325, 1.89539, 1.86622, 2.15057, -1.06869, -0.0734473, -0.0853545, -3.63634, 4.84367, 2.74926, 2.69572, 0.537416, 2.14108, -2.99542, -2.92206, -1.37465, -1.79248, 2.44345, 2.47911, 1.84345, 3.66819, 2.39081, 2.42612, 1.7734, 3.6006, -1.05825, -1.12896, 3.9256, 2.71051, 3.5861, 3.65812, -3.99355, -1.97618, -1.09381, -1.11789, -3.86522, -1.3513, -2.39412, -2.44732, -5.81172, 6.0477, 1.40761, 0.00965746"
# via Excel
b = [float(k) for k in str.split('\t')]
# via Sublime
# b = [float(k) for k in str.split(',')]
weights = []
biases = []
iter = 1
for i in range(len(neurons) - 1):
m = []
for j in range(neurons[i]):
m.append(b[iter:iter + neurons[i + 1]])
iter += neurons[i + 1]
biases.append(np.matrix(b[iter:iter + neurons[i + 1]]).transpose())
iter += neurons[i + 1]
weights.append(np.matrix(m).transpose())
def calcOutput(x, y):
inputs = weights[0] * np.matrix([[x], [y]]) + biases[0]
for i in range(len(neurons) - 2):
outputs = sigmoid(inputs)
inputs = weights[i + 1] * outputs + biases[i + 1]
outputs = sigmoid(inputs)
return outputs[0, 0]
def meshgrid_propagation_forMap(layersArray):
N = 100
xyRange = 1.0
# グリッド作成
x = np.linspace(0, xyRange, N)
y = np.linspace(0, xyRange, N)
X, Y = np.meshgrid(x, y) # グリッド領域を示すマトリックスの構成
Z = np.array(list(map(lambda x, y: list(map(calcOutput, x, y)), X, Y)))
pcolor(X, Y, Z)
colorbar()
pink()
# fig = plt.figure()
# ax = Axes3D(fig)
# ax.plot_surface(X, Y, Z, rstride=1, cstride=1,cmap=cm.coolwarm)
# ax.plot_wireframe(X,Y,Z)
plt.xlabel('Input1', fontsize=18)
plt.ylabel('Input2', fontsize=18)
# ax.set_zlabel('O')
plt.xlim(0, 1.0)
plt.ylim(0, 1.0)
plt.legend(fontsize=18)
plt.tick_params(labelsize=18)
plt.grid()
# plt.savefig("time"+str(learningTime)+"Neuron"+str(neurons)+"samp"+str(samplingTime)+"rand"+str(maxOfRandom)+"r"+str()+".png")
plt.show()
meshgrid_propagation_forMap(neurons)
|
[
"ishidu-ryota-fz@ynu.jp"
] |
ishidu-ryota-fz@ynu.jp
|
08605913b8523532604ffdc39e1ce0712d36ba0a
|
282ecc293288f6ea84abe66c84f98fe2dd9f0835
|
/Project_2/pipelines/simple_preprocessors.py
|
23c9588e6ea656867a09ff0fd88b256a4d68af33
|
[
"MIT"
] |
permissive
|
TitoGrine/IART_Project
|
b61d771e2389164833d280d6306bb4cddc9f5dea
|
a387cface38473fa90e132207847887b43a69cec
|
refs/heads/master
| 2022-12-15T21:04:24.315639
| 2020-09-19T14:43:21
| 2020-09-19T14:43:21
| 242,172,263
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 554
|
py
|
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.pipeline import make_pipeline
from preprocessors.lemmatization import Lemmatization
from preprocessors.tokenizer import Tokenizer
from preprocessors.utils import fit
def simple_pipeline(x, y, clsf):
model = make_pipeline(Tokenizer(preserve_case=False, strip_handles=False), Lemmatization(),
TfidfVectorizer(lowercase=False, tokenizer=lambda _: _))
vectorized_x = model.fit_transform(x, y)
return fit(vectorized_x, y, clsf, oversample=True)
|
[
"andrefmrocha@live.com.pt"
] |
andrefmrocha@live.com.pt
|
17630cb458f4d2ca573790240cf6ab249946ed4d
|
0c6e76b147e165f0108a6371fb0ec728b8c52ec4
|
/data.py
|
7e2bfeebea6c795d237072811201073c6201daab
|
[
"MIT"
] |
permissive
|
delwende/inflected-abbreviation-expansion
|
a1c4908c77388bf2bdaa4b4a5705b8ef1b6ce67c
|
850c8490ec16e4de6e469912397e8e91b5bbba74
|
refs/heads/master
| 2020-03-19T12:46:43.324277
| 2018-06-06T17:16:10
| 2018-06-06T17:16:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,406
|
py
|
import gzip
from copy import copy
from itertools import chain
from pathlib import Path
from xml.etree.ElementTree import iterparse
def overlapping_grouper(iterable, n, prefix_symbol=('', ''), suffix_symbol=('', '')):
queue = [prefix_symbol] * n
offset = int(n // 2)
for idx, item in enumerate(chain(iterable, [suffix_symbol] * offset)):
queue.pop(0)
queue.append(item)
if idx >= offset:
yield copy(queue)
def retrieve_words_to_abbreviate(path):
with open(path) as expansions_file:
expansions = {parts[0]: parts[1:] for parts in map(lambda l: l.strip().split(';'), expansions_file)}
return frozenset(chain.from_iterable(expansions.values()))
def find_morphosyntax_files(path):
path = Path(path)
return [path] if path.is_file() else path.glob('*/**/ann_morphosyntax.xml*')
def parse_annotated_sentences(annotation_xml):
# Helper functions to retrieve the XML sub-element with the same name for a given element
# Doesn't seem readable at first but it's the same as the XML
def orth(fs_elem):
return fs_elem[0][0].text
def pos(fs_elem):
disamb = [e for e in fs_elem if e.get('name', '') == 'disamb'][0]
return disamb[0][1][0].text.split(':', maxsplit=1)[1]
def fs(seg_elem):
return seg_elem[0]
opener = gzip.open if annotation_xml.suffix == '.gz' else open
with opener(annotation_xml, encoding='utf-8') as ann_f:
for event, elem in iterparse(ann_f):
if tag_uri_and_name(elem)[1] == 's': # just parsed a sentence
sent = [(orth(fs(seg)), pos(fs(seg))) for seg in list(elem)]
yield sent
def sentences_with_abbreviations(corpus_path, abbreviable_words):
for f in find_morphosyntax_files(corpus_path):
for sentence in parse_annotated_sentences(f):
target_abbreviations = ((word, tag, pos) for pos, (word, tag) in enumerate(sentence) if
word.lower() in abbreviable_words)
for abbreviation in target_abbreviations:
yield sentence, abbreviation
def tag_uri_and_name(elem):
"""https://stackoverflow.com/questions/1953761/accessing-xmlns-attribute-with-python-elementree"""
if elem.tag[0] == "{":
uri, ignore, tag = elem.tag[1:].partition("}")
else:
uri = None
tag = elem.tag
return uri, tag
|
[
"petezor@gmail.com"
] |
petezor@gmail.com
|
81a36f9edec913d3549c0af94feff76ee65ac580
|
05b9f3aa55a4fb89c9474d77bc58cfe18d800622
|
/test2.py
|
dbedeafdbae3710983de3e17c0c3929be4398f78
|
[] |
no_license
|
LezardWhiteGlint/Practice_backup
|
718c5608982ac5a1b6c3d74d60327753792282a9
|
c0b4475684cbb0e131e40ad7b116162c4f7529c9
|
refs/heads/master
| 2020-03-25T20:17:37.125541
| 2019-03-04T07:17:39
| 2019-03-04T07:17:39
| 144,124,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 795
|
py
|
def ulti_display(weeklist,daily_h,date):
total = 0
data = []
message = []
for weekday in weeklist:
for i in range(len(date)):
if weekday == date[i]:
data.append(daily_h[i])
total = total + daily_h[i]
#print "<html><body>"
for i in data:
hours = int(i/2)
minutes = int(i%2)*30
message.append(str(hours)+':'+str(minutes))
total_h = int(total/2)
total_m = int(total%2)*30
totoal_message = str(total_h)+':'+str(total_m)
try:
for i in range(len(weekday)):
print('<p>Date: '+weekday[i]+'-------'+message[i]+'</p>')
print "\n"
print('<p>This Week Total: '+weekday[i]+'-------'+data[i]+'</p>')
except: IndexError
# print "</body></html>"
|
[
"30715385+LezardWhiteGlint@users.noreply.github.com"
] |
30715385+LezardWhiteGlint@users.noreply.github.com
|
a30be66391f29c734f4450832265dd84031abf3d
|
0a69dbc358b7114285b983ff435825cfc9f6a04a
|
/recipes1.py
|
86b855f28b61a0d39c6558b61081d14418d13263
|
[] |
no_license
|
jrovegno/MachineLearningRecipes
|
9711f429a676d52ab967bc77ceb2ae5159722837
|
3d2a9fd7e2ad25febbb22b33e80a3dd3b501e86b
|
refs/heads/master
| 2021-01-19T03:03:10.755268
| 2016-07-20T21:52:13
| 2016-07-20T21:52:13
| 63,817,073
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 319
|
py
|
import numpy as np
from sklearn import tree
from sklearn.datasets import load_iris
features_dict = {1:'smooth', 0:'bumpy'}
labels_dict = {1:'orange', 0:'apple'}
features = [[140,1],[130,1],[150,0],[170,0]]
labels = [0,0,1,1]
clf = tree.DecisionTreeClassifier()
clf.fit(features, labels)
print clf.predict([[150, 0]])
|
[
"javier.rovegno@gmail.com"
] |
javier.rovegno@gmail.com
|
dfce5b807530a3a4f49147428f9734961831ee7d
|
b8fe8b4bbd4694d2636c31643252fac4b62d1fe3
|
/Desktop/Mhakave/APp/src/Home/admin.py
|
36ca4d384276f60072b9f3c946d434c25043ba26
|
[] |
no_license
|
indraw705/django
|
f7339ab7f919522c0bc58792365562a03630f08d
|
3e05dde353185f1d5bf7139b2a5d582ec050ed0b
|
refs/heads/master
| 2021-01-12T04:12:29.087198
| 2017-02-09T14:42:54
| 2017-02-09T14:42:54
| 77,545,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 308
|
py
|
from django.contrib import admin
# Register your models here.
from .forms import SignUpForm
from .models import SignUp
class SignUpAdmin(admin.ModelAdmin):
list_display = ["__unicode__", "timestamp", "updated"]
form = SignUpForm
# class Meta:
# model = SignUp
admin.site.register(SignUp,SignUpAdmin)
|
[
"="
] |
=
|
cce6ffb32fcbb73d9c09532d13b9173fde291e94
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/FJZJJMM/YW_FJZJJMM_SZSJ_103.py
|
2be689302212420534b553c97e1696e9d1e56c5c
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,091
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FJZJJMM_SZSJ_103(xtp_test_case):
# YW_FJZJJMM_SZSJ_103
def test_YW_FJZJJMM_SZSJ_103(self):
title = '深圳A股股票交易日五档即成转撤销买——数量溢出(100亿)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11000107,
'errorMSG': queryOrderErrorMsg(11000107),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('999999', '2', '24', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':2,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_BEST5_OR_CANCEL'],
'price': stkparm['涨停价'],
'quantity': 10000000000,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
f6c75d42e11df14d53847eeda1c2906c9ef684e5
|
9ff036f0cb2c5a9850af69181d016f5ca11ab63d
|
/week-02/day-1/square.py
|
6532ab2375ca22bfe3282718631617a9636490c2
|
[] |
no_license
|
balintnem3th/balintnem3th
|
bd48f8aa7e79c718049f50e52bdd6be9ccf7b251
|
58bf033314466d96ed5b81b3642f04149fc463b0
|
refs/heads/master
| 2021-05-11T08:06:20.545375
| 2018-03-25T23:58:32
| 2018-03-25T23:58:32
| 118,041,790
| 0
| 0
| null | 2018-01-18T21:42:43
| 2018-01-18T21:42:43
| null |
UTF-8
|
Python
| false
| false
| 170
|
py
|
user_input= int(input("Give me a number:"))
print('%'* user_input)
for index in range(1,user_input-1):
print('%'+ ((user_input-2)* ' ') + '%')
print('%'* user_input)
|
[
"balint.nem3th@gmail.com"
] |
balint.nem3th@gmail.com
|
bb1475121d62a7908a964aaaf795d0eb6cac5093
|
2e47a9f02dbc1d88411a630994b8f8d092038cfe
|
/dragon/bin/pip2.7
|
1573dec81e5b175435ee416f37a8cd98852c67c7
|
[] |
no_license
|
mathewcmartin/dragon
|
1386357eb99904483c9a33ff0370087303ecfb58
|
0aac30f90c8e553de11decf9a36084fa976c2c7a
|
refs/heads/master
| 2021-01-01T20:06:55.403634
| 2017-08-01T20:55:00
| 2017-08-01T20:55:00
| 98,766,185
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 243
|
7
|
#!/home/mathewcmartin/Projects/dragon/dragon/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
|
[
"chactasstryker@gmail.com"
] |
chactasstryker@gmail.com
|
b22ef7dcc0d2a7bdd70dc2123f3593c6c0156719
|
3df8c9f513627187311c67260e8e28d02825eab0
|
/src/transformers/utils/__init__.py
|
42e856d9e4acb3d5c4ec7dc5b24b3d6cb0b7649f
|
[
"Apache-2.0"
] |
permissive
|
ondewo/transformers
|
9245322d33e09bf4d4529375c40ea2373d9701bd
|
45bd44f215eb0a42d2b5994548756d506750b9d3
|
refs/heads/main
| 2023-05-08T11:59:01.284482
| 2023-05-07T14:12:23
| 2023-05-07T14:12:23
| 637,418,744
| 0
| 0
|
Apache-2.0
| 2023-09-04T14:23:00
| 2023-05-07T13:56:14
|
Python
|
UTF-8
|
Python
| false
| false
| 6,277
|
py
|
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bs4_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectron2_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_py3nvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tf2onnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bf16_available,
is_torch_bf16_cpu_available,
is_torch_bf16_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tf32_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
torch_version,
)
WEIGHTS_NAME = "pytorch_model.bin"
WEIGHTS_INDEX_NAME = "pytorch_model.bin.index.json"
TF2_WEIGHTS_NAME = "tf_model.h5"
TF2_WEIGHTS_INDEX_NAME = "tf_model.h5.index.json"
TF_WEIGHTS_NAME = "model.ckpt"
FLAX_WEIGHTS_NAME = "flax_model.msgpack"
FLAX_WEIGHTS_INDEX_NAME = "flax_model.msgpack.index.json"
SAFE_WEIGHTS_NAME = "model.safetensors"
SAFE_WEIGHTS_INDEX_NAME = "model.safetensors.index.json"
CONFIG_NAME = "config.json"
FEATURE_EXTRACTOR_NAME = "preprocessor_config.json"
IMAGE_PROCESSOR_NAME = FEATURE_EXTRACTOR_NAME
GENERATION_CONFIG_NAME = "generation_config.json"
MODEL_CARD_NAME = "modelcard.json"
SENTENCEPIECE_UNDERLINE = "▁"
SPIECE_UNDERLINE = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
MULTIPLE_CHOICE_DUMMY_INPUTS = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def check_min_version(min_version):
if version.parse(__version__) < version.parse(min_version):
if "dev" in min_version:
error_message = (
"This example requires a source install from HuggingFace Transformers (see "
"`https://huggingface.co/transformers/installation.html#installing-from-source`),"
)
else:
error_message = f"This example requires a minimum version of {min_version},"
error_message += f" but the version found is {__version__}.\n"
raise ImportError(
error_message
+ "Check out https://huggingface.co/transformers/examples.html for the examples corresponding to other "
"versions of HuggingFace Transformers."
)
|
[
"noreply@github.com"
] |
ondewo.noreply@github.com
|
cddc26b4115c69d78efcb569dfef642b7f2d2117
|
f7f3b63d6743624529e02d2192b92db15751943f
|
/leetCode33/33_Search_in_Rotated_Sorted_Array.py
|
a12fca8b60aaa48dc57a971c34c9ff1d3a5db7e4
|
[] |
no_license
|
JhaPrashant1108/leetcode
|
0d461556a3ea3b35ba756a90f064d20fc53aff37
|
d33f8e43aed926f210ce7eed32e0f6b8011d28c7
|
refs/heads/master
| 2023-08-24T18:57:06.215997
| 2021-10-07T12:10:27
| 2021-10-07T12:10:27
| 288,906,296
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 884
|
py
|
class Solution:
def search(self, nums, target):
if not nums or len(nums) == 0:
return -1
l, r = 0, len(nums) - 1
while(l < r):
mid = (int)(l + (r-l)/2)
if (nums[mid] > nums[r]):
l = mid+1
else:
r = mid
start = l
l, r = 0, len(nums)-1
if target >= nums[start] and target <= nums[r]:
l = start
else:
r = start
while(l <= r):
mid = (int)(l + (r-l)/2)
if nums[mid] == target:
return mid
elif(nums[mid] < target):
l = mid+1
else:
r = mid - 1
return -1
L = Solution()
print(L.search(nums=[4, 5, 6, 7, 0, 1, 2], target=0))
print(L.search(nums=[4, 5, 6, 7, 0, 1, 2], target=3))
print(L.search(nums=[1], target=0))
|
[
"prashant.jha0008@gmail.com"
] |
prashant.jha0008@gmail.com
|
86305fbe815e2d06c1391d16d43539401c7146a4
|
7b09d42fdfcd2c2dbf6e3eefb3adfa99e9df794b
|
/gym_PET/envs/dimsiz.py
|
f5a7c842e5f3f0799274ce4e26b6d8a4e0af4bf3
|
[] |
no_license
|
mcbrs1a/TestPet
|
294a0bf8f62c2e808e0b7c7b956b54b13ed9e963
|
6f910da44835b560af80c15a0a5f439b140b70c9
|
refs/heads/master
| 2021-09-11T01:30:47.652102
| 2018-04-05T18:31:21
| 2018-04-05T18:31:21
| 114,370,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 580
|
py
|
#get dimensions and size of image
def dimsiz(im):
dim=len(im.shape)
sz=im.shape
if dim==2:
fx, fy = im.shape
print('two dimensions')
sz= fx, fy
return sz
else:
fx, fy, fz = im.shape
print('three dimensions')
sz= fx, fy, fz
return sz
sz=dimsiz(im)
bound=sz
vbnd=([20,50],[30,100]) #fxa fxb fya fyb
def boundbox2(im,bound,vbnd):
if len(bound)==2:
imc=im[vbnd[0]:vbnd[1],vbnd[2]:vbnd[3]]
return imc
else:
imc=im[vbnd[0]:vbnd[1],vbnd[2]:vbnd[3]]
return imc
|
[
"smithR50@cardiff.cf.ac.uk"
] |
smithR50@cardiff.cf.ac.uk
|
cb71b7a1c80c4a725d89d8b324b87b7a22d6991c
|
d8c80a8c7f38ea917b3076dc82844f678cf05412
|
/setup.py
|
e367ab68bdc967db1a78ff81a4197ed1b8a0478c
|
[
"MIT"
] |
permissive
|
openlobby/openlobby-server
|
dcac830c0ffeae8d039d9d4f4324271ec1126a89
|
b7a1a2b73e903c4da57970926844b0639dce5aae
|
refs/heads/master
| 2023-04-04T01:47:06.477282
| 2018-11-11T22:07:24
| 2018-11-11T22:07:24
| 110,363,977
| 7
| 3
|
MIT
| 2021-04-20T17:40:09
| 2017-11-11T17:08:33
|
Python
|
UTF-8
|
Python
| false
| false
| 1,026
|
py
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="openlobby",
version="0.1.0",
url="https://github.com/openlobby/openlobby-server",
author="Jan Bednarik",
author_email="jan.bednarik@gmail.com",
description="Open Lobby Server",
long_description=long_description,
packages=find_packages(exclude=["tests"]),
# TODO
# install_requires=[],
# extras_require={
# 'dev': [],
# 'test': [],
# },
license="MIT",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
],
)
|
[
"jan.bednarik@gmail.com"
] |
jan.bednarik@gmail.com
|
d33aa0643b569cebb4309c3a629bde092d02e7ac
|
b6ffc3b8717ebbeb96ca0d2b91cfb7de62a58b68
|
/growth rate üLCDM.py
|
d6654562c49df035e355b4294a57946daf822031
|
[] |
no_license
|
laya-laya/Dark-Energy-models
|
43968ed07a535247a32d2ef71aec5b8828484310
|
65cc0e31b28138a69407c4f433e727cdb0dc53e9
|
refs/heads/master
| 2020-04-29T03:09:24.875235
| 2020-02-01T10:53:55
| 2020-02-01T10:53:55
| 175,798,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,176
|
py
|
import numpy as np
import matplotlib.pyplot as plt
#from scipy.integrate import quad
from scipy.integrate import odeint
OM = 0.29
#gamma = 0.545
sigma8 = 0.8
z_star = 0.537
def E(z):
if z > z_star:
Ez = np.sqrt( 1-((0.75*OM)/(1+z_star))-(0.25*OM*((1+z_star)**3))+(OM * np.power(1 + z,3)))
else:
Ez = np.sqrt(1-((0.75*OM)/(1+z_star))+(((0.75*OM)/(1+z_star))*((1+z)**4)))
return Ez
def dlnEdz(z):
if z > z_star:
dlnEdz = (1.5*((1+z)**2))/(1-((0.75*OM)/(1+z_star))-(0.25*OM*((1+z_star)**3))+ (OM * np.power(1 + z,3)))
else:
dlnEdz = (1.5*(OM/(1+z_star))*((1+z)**3))/(1-((0.75*OM)/(1+z_star))+((0.75*OM)/(1+z_star))*((1+z)**4))
return dlnEdz
def f(x,t):
return ((x**2)/(1+t))-(dlnEdz(t)-(2/(1+t)))*x - (2*OM*((1+t)**2))/((E(t))**2)
time = np.linspace(0,5,1000)
z = np.linspace(0,3,1000)
z2 = odeint(f,0.56,time)
a = 0
b = 10
N = 1000
h1 = (b-a)/N
z1=1
sigma = np.zeros(N)
fsigma = np.zeros(N)
for i in range(0,N):
sigma[i] = sigma8*np.sum(np.exp(-((z2[i])/(1+i*h1))*h1))
for i in range(0,N):
fsigma[i] = z2[i]*sigma[i]
plt.plot(time,fsigma)
plt.xlim(0, 0.9)
plt.ylim(0.0, 0.8)
|
[
"noreply@github.com"
] |
laya-laya.noreply@github.com
|
1e24fdbcd189652e333de928e8269c95f940fac0
|
42310122958f2f7d65cbc1f419f9e8d44c0e829e
|
/HomePageProject/manage.py
|
d0514acf568aa2aef640efaf1a738cbb391e653d
|
[] |
no_license
|
nomanalvi/UserInterface
|
092c684649d73fece14bafe8d3c5a3146be37173
|
e3c85a4c6bb1f3af3ba6166e42d0bde77a0e1494
|
refs/heads/master
| 2022-12-02T18:09:56.424258
| 2020-08-26T13:39:07
| 2020-08-26T13:39:07
| 290,504,138
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'HomePageProject.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"maanalvi@gmail.com"
] |
maanalvi@gmail.com
|
0254a7648ea171fa9a2a5352028ad8fe70447f65
|
58da4f1a574f90870a5fd454999531d6d0047b55
|
/File_Change/detect_change.py
|
17e3747ab36c304f0468a80fe076b36fa3b29032
|
[] |
no_license
|
HarshGrandeur/CloudComputing
|
a046563e955e6a3e817a9e11e6306c0ae5dc266d
|
8d896f7f63bbf10f52c6cfdcb21f402340c9b411
|
refs/heads/master
| 2021-01-19T22:59:08.236046
| 2017-10-15T07:30:36
| 2017-10-15T07:30:36
| 88,900,164
| 0
| 0
| null | 2017-04-20T18:53:53
| 2017-04-20T18:53:53
| null |
UTF-8
|
Python
| false
| false
| 643
|
py
|
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
path = sys.argv[1] if len(sys.argv) > 1 else '.'
event_handler = LoggingEventHandler()
observer = Observer()
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
|
[
"hrshgpt73@gmail.com"
] |
hrshgpt73@gmail.com
|
01134ab072c1292ae5b05bafed12ab0d5e3bedfd
|
d4b1adf0fa11dace5827015a5ba7a4a212603619
|
/Backend/image_file_test/api/views.py
|
fcfddaf7ba30d167a2d6111da5ca180b9c83676e
|
[] |
no_license
|
aSafarpoor/tafsir-swe2-prj
|
0076953fd87e2a0804713623fb25f770eb9785de
|
de79658d8c64c8efd74d8b5a33eced8b7808923a
|
refs/heads/master
| 2022-01-18T19:58:33.301413
| 2019-07-22T07:55:10
| 2019-07-22T07:55:10
| 198,176,818
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,488
|
py
|
from rest_framework import generics
from course_app.models import course,section,question_exam
from image_file_test.models import image_file as imfi
from image_file_test.models import movie_link
#from serializers import CourseListSerializer
from django.core import serializers
from .serializers import CreateMember,MyImageModelSerializer,tempModelSerializer
import requests
import json
from django.http import HttpResponse,JsonResponse
import base64
from rest_framework import generics
from course_app.models import course,section,question_exam
#from serializers import CourseListSerializer
import json
from django.http import HttpResponse
class create1(generics.CreateAPIView):
queryset = imfi.objects.all()
serializer_class = CreateMember
class ListAPIview(generics.ListAPIView):
queryset = imfi.objects.all()
serializer_class = CreateMember
from rest_framework.decorators import api_view
@api_view(['GET', 'POST'])
def create0(request):
if request.method=="POST" :
serializer = MyImageModelSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
message="ok"
return HttpResponse(message)
message="nokey"
return HttpResponse(message)
else:
message="bad request"
return HttpResponse(message)
def get_movie_id(request):
if request.method=='GET':
try:
json_data=request.body
except:
message="bad urllll"
return HttpResponse(message)
'''
try :
current_user = request.user
except:
message="not logged in"
return HttpResponse(message)
if(current_user.teacher==False):
message="not teacher"
return HttpResponse(message)
'''
obj=movie_link()
obj.save()
obj_id=obj.id
dict={}
dict["id"]=obj_id
return JsonResponse(dict)
message="bad request"
return HttpResponse(message)
@api_view(['GET', 'POST'])
def call(request):
if request.method=="POST" :
import logging
import boto3
from botocore.exceptions import ClientError
def create_presigned_post(bucket_name, object_name,
fields=None, conditions=None, expiration=3600):
"""Generate a presigned URL S3 POST request to upload a file
:param bucket_name: string
:param object_name: string
:param fields: Dictionary of prefilled form fields
:param conditions: List of conditions to include in the policy
:param expiration: Time in seconds for the presigned URL to remain valid
:return: Dictionary with the following keys:
url: URL to post to
fields: Dictionary of form fields and values to submit with the POST
:return: None if error.
"""
# Generate a presigned S3 POST URL
s3_client = boto3.client('s3')
try:
response = s3_client.generate_presigned_post(bucket_name,
object_name,
Fields=fields,
Conditions=conditions,
ExpiresIn=expiration)
except ClientError as e:
logging.error(e)
return None
# The response contains the presigned URL and required fields
return response
# import requests # To install: pip install requests
'''
# Generate a presigned S3 POST URL
object_name = 'OBJECT_NAME'
response = create_presigned_post('BUCKET_NAME', object_name)
if response is None:
exit(1)
# Demonstrate how another Python program can use the presigned URL to upload a file
with open(object_name, 'rb') as f:
files = {'file': (object_name, f)}
http_response = requests.post(response['url'], data=response['fields'], files=files)
# If successful, returns HTTP status code 204
logging.info(f'File upload HTTP status code: {http_response.status_code}')
'''
'''
@api_view(['GET', 'POST'])
def call(request):
if request.method=='POST' or request.method=='GET':
# print(request.data)
try :
current_user = request.user
except:
message="not logged in"
return HttpResponse(message)
if(current_user.teacher==False):
message="not teacher"
return HttpResponse(message)
if False :
pass
else:
# msg=serializer.save()
# print("Fukkkkkkkkkkkkkkkkkkkkkkkkkk")
msg=request.data
# msg=msg[0]
print(msg)
print("\n\n")
try :
token="8061df45098379e19114ab01f4a9eb27"
address="https://www.aparat.com/etc/api/uploadform/luser/amirmansoubi828/ltoken/"+token
response = requests.get(address)
data=json.loads(response.content)
frm_id=data["uploadform"]["frm-id"]
except:
response = requests.get("https://www.aparat.com/etc/api/login/luser/amirmansoubi828/lpass/79e9feb0135e82cab14fed182ef0891b9920d641")
data=json.loads(response.content)
token=data["login"]["ltoken"]
address="https://www.aparat.com/etc/api/uploadform/luser/amirmansoubi828/ltoken/"+token
# response = requests.get(address)
# data=json.loads(response.content)
# frm_id=data["uploadform"]["frm-id"]
# msg["frm-id"]=frm_id
# print(data)
final_url="https://www.aparat.com/etc/api/uploadpost/luser/amirmansoubi828/username/amirmansoubi828/ltoken/8061df45098379e19114ab01f4a9eb27/uploadid/9331616/atrty/1562619001/avrvy/977508/key/05f962bbdb0e2fd7455bf6a712416a3b75bf54e7/"
response = requests.post(final_url, data=msg)
print("\n\n")
print(str(response.content))
print("\n\n")
message="ok"
return HttpResponse(message)
# message="nokey"
# return HttpResponse(message)
else:
message="bad request"
return HttpResponse(message)
'''
|
[
"alisafarpoor1995@gmail.com"
] |
alisafarpoor1995@gmail.com
|
3900b4a694786d1e272c019b473c940f1fb15498
|
3907f9297caec6b097c06eda0e69a38cfca9be95
|
/utilities.py
|
f564cec2083ed64a27958b6ab1063d3d8b4664ee
|
[] |
no_license
|
Abbey4799/DGen
|
493048a1e98416784e420ce9996b68454a11c53b
|
ddbe0e47080bb9ec9335e93ea8993ef410ba9cd7
|
refs/heads/main
| 2023-07-01T06:07:52.863682
| 2021-07-29T16:51:37
| 2021-07-29T16:51:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,628
|
py
|
# -*- coding: utf-8 -*-
import re
import urllib
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet
import requests
from nltk.stem import PorterStemmer
cache = {}
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
def get_concepts_of_instance_by_probase(instance, eval, use_cache=True):
"""
Fetches the concept and the probabilities for a given instance by probase.
:param instance: the instance, for which the concepts should be requested
:param use_cache: if true a cache for instances and corresponding concepts is used, to avoid unnecessary requests
:return: the concepts and their probability
"""
from urllib.parse import urlparse
if use_cache == True and instance in cache:
return cache[instance]
if eval:
probase_url = (
"https://concept.research.microsoft.com/api/Concept/ScoreByProb?instance={"
"}&topK=100&api_key=eT5luCbmII34ZvpPVs7HxtbUU1cFcE12"
)
else:
probase_url = "https://concept.research.microsoft.com/api/Concept/ScoreByProb?instance={}&topK=20&api_key=eT5luCbmII34ZvpPVs7HxtbUU1cFcE12"
try:
requestUrl = probase_url.format(urllib.request.pathname2url(instance))
except:
print("request error!")
requestUrl = probase_url.format(urllib.pathname2url(instance))
try:
response = requests.get(requestUrl, verify=False)
except requests.exceptions.ConnectionError as e:
print(e)
print("\n\ntry one last time...")
response = requests.get(requestUrl, verify=False)
if response is None:
print("microsoft api error!")
return None
concepts = response.json()
return concepts
def appendIfNotEmpty(list, item):
"""
Append item to list, if item is not None. in place
:param list: the list, where the item should been appended to
:param item: the item which should been appended to the list
"""
if item:
list.append(item)
def split_text_in_words(text):
"""
Splits a given text into words
:param text: the text which should be splited into words
:return: a list containing the splitted words
"""
real_words = []
words = re.findall(
r'\'|’|"|”|“|»|«|\(|\)|\[|\]|\{|\}:;|[^\'’"”“»«\(\)\[\]\{\}\s:;]+', text
)
for word in words:
word = word.strip()
if word.startswith("..."):
real_words.append(word[:3])
appendIfNotEmpty(real_words, word[3:])
if word.startswith(('"', "(", "[", "{", "<", "«", "…", "“")):
real_words.append(word[:1])
word = word[1:]
if word.endswith("..."):
appendIfNotEmpty(real_words, word[:-3])
real_words.append(word[-3:])
elif word.endswith(
(".", ",", ":", ";", "]" ")", "}", "!", "?", '"', ">", "»", "…", "”")
):
appendIfNotEmpty(real_words, word[:-1])
real_words.append(word[-1:])
else:
appendIfNotEmpty(real_words, word)
return real_words
def normalize_instance(s, mode=2):
"""
Normalize to a lowercase lemma string
:param s: the s to be processed
:param mode: 1 means return all syset, 2 means only return itself
"""
try:
s = s.lower()
s = lemmatizer.lemmatize(s)
# s = stemmer.stem(s)
except:
return s
if mode == 1:
synset = set()
for syn in wordnet.synsets(s):
for l in syn.lemmas():
synset.add(l.name().replace("_", " "))
return synset
else:
return s
|
[
"rsy@163.com"
] |
rsy@163.com
|
f35c4f6e36678c5ab330fd80f885d8e6603d17da
|
91d9d33931c4ecbbe78eaaf092b601fa9c522f64
|
/novajoin/middleware/config.py
|
7eb783f39d2a70c041d537dbdc3154266d95c313
|
[
"Apache-2.0"
] |
permissive
|
mattparko/novajoin
|
8799f7d00a715c9154083489933ece629d269f48
|
e8b18c4bd44ea86cbbedfdf9a9f0f7f7718c9c56
|
refs/heads/master
| 2020-06-03T07:28:49.155058
| 2019-03-20T07:55:42
| 2019-03-21T07:58:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,739
|
py
|
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_middleware import cors
def set_config_defaults():
"""This method updates all configuration default values."""
set_cors_middleware_defaults()
def set_cors_middleware_defaults():
"""Update default configuration options for oslo.middleware."""
cors.set_defaults(
allow_headers=['Content-MD5',
'X-Image-Meta-Checksum',
'X-Storage-Token',
'Accept-Encoding',
'X-Auth-Token',
'X-Identity-Status',
'X-Roles',
'X-Service-Catalog',
'X-User-Id',
'X-Tenant-Id',
'X-OpenStack-Request-ID'],
expose_headers=['X-Image-Meta-Checksum',
'X-Auth-Token',
'X-Subject-Token',
'X-Service-Token',
'X-OpenStack-Request-ID'],
allow_methods=['GET',
'PUT',
'POST',
'DELETE',
'PATCH']
)
|
[
"alee@redhat.com"
] |
alee@redhat.com
|
471a83b33e8a7e89fc6b0398484c72c6c9845b9c
|
d34cf4ecb93e4a7d7ec95424e52f815f36206f43
|
/buildsystem/plugins.py
|
74feb5c0ab655307cb52d7fac3b173f2390f23e5
|
[] |
no_license
|
FelixKiprono/felix
|
74a2fe2b498d07a993f9ad67ff88bc916ddb271b
|
b4d0dae314d9ae7b89f7e273ca89555e91b7312f
|
refs/heads/master
| 2021-01-18T04:53:31.002406
| 2015-05-27T13:15:33
| 2015-05-27T13:15:33
| 36,385,023
| 2
| 0
| null | 2015-05-27T17:49:32
| 2015-05-27T17:49:32
| null |
UTF-8
|
Python
| false
| false
| 935
|
py
|
import fbuild
import fbuild.builders.file
import os
from fbuild.path import Path
from fbuild.builders.file import copy
# ------------------------------------------------------------------------------
def build(phase, felix):
print ("BUILDING PLUGINS")
for f in Path.glob('src/lib/plugins/*'):
copy(ctx=phase.ctx, src=f, dst=phase.ctx.buildroot / 'share'/f[4:])
plugins = [
#"ocaml2html",
#"py2html",
#"fdoc2html",
#"flx2html",
#"cpp2html",
#"fpc2html",
#"fdoc_slideshow",
#"fdoc_paragraph",
#"fdoc_heading",
#"fdoc_fileseq",
#"fdoc_scanner",
#"fdoc_button",
"toolchain_clang_osx",
"toolchain_clang_linux",
"toolchain_gcc_osx",
"toolchain_gcc_linux",
]
for base in plugins:
shlib = felix.compile(phase.ctx.buildroot/('share/lib/plugins/'+base+'.flx'),flags=['-od',phase.ctx.buildroot/'host/lib/rtl'])
|
[
"Max.Skaller@gmail.com"
] |
Max.Skaller@gmail.com
|
91758f658fe72ce35e66cc11b4da7088508a3c71
|
42fb58ce2446f4f2f11f8e3fd3bf197c514e2f34
|
/test.py
|
df8fd254bb2a0ccd5b5978022801ba36fd1589e7
|
[] |
no_license
|
EdisonFan/learnPython
|
d994c90646f0a289d02365c72d7e258e6067dfb2
|
660824db5dcd7214d880e561aca0aa794bd6f690
|
refs/heads/master
| 2020-03-07T13:58:38.170965
| 2018-03-31T08:58:58
| 2018-03-31T08:58:58
| 127,515,420
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 29 21:21:20 2018
@author: fan
"""
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
X_train = [[6], [8], [10], [14], [18]]
y_train = [[7], [9], [13], [17.5], [18]]
X_test = [[6], [8], [11], [16]]
y_test = [[8], [12], [15], [18]]
# 建立线性回归,并用训练的模型绘图
regressor = LinearRegression()
regressor.fit(X_train, y_train)
xx = np.linspace(0, 26, 100)
yy = regressor.predict(xx.reshape(xx.shape[0], 1))
plt = runplt()
plt.plot(X_train, y_train, 'k.')
plt.plot(xx, yy)
quadratic_featurizer = PolynomialFeatures(degree=3)
X_train_quadratic = quadratic_featurizer.fit_transform(X_train)
X_test_quadratic = quadratic_featurizer.transform(X_test)
regressor_quadratic = LinearRegression()
regressor_quadratic.fit(X_train_quadratic, y_train)
xx_quadratic = quadratic_featurizer.transform(xx.reshape(xx.shape[0], 1))
plt.plot(xx, regressor_quadratic.predict(xx_quadratic), 'r-')
plt.show()
#print(X_train)
#print(X_train_quadratic)
#print(X_test)
#print(X_test_quadratic)
#print('1 r-squared', regressor.score(X_test, y_test))
#print('2 r-squared', regressor_quadratic.score(X_test_quadratic, y_test))
|
[
"fanxh@calis.edu.cn"
] |
fanxh@calis.edu.cn
|
d173feef779ae3935c6b7bfeed5894b1d4ec1e8e
|
d5f75adf5603927396bdecf3e4afae292143ddf9
|
/python/paddle/fluid/tests/unittests/test_inplace_and_clear_gradient.py
|
7ec04ed90b0aeef7599cde4e902c47111f1536bd
|
[
"Apache-2.0"
] |
permissive
|
jiweibo/Paddle
|
8faaaa1ff0beaf97ef7fb367f6c9fcc065f42fc4
|
605a2f0052e0ffb2fab3a4cf4f3bf1965aa7eb74
|
refs/heads/develop
| 2023-07-21T03:36:05.367977
| 2022-06-24T02:31:11
| 2022-06-24T02:31:11
| 196,316,126
| 3
| 2
|
Apache-2.0
| 2023-04-04T02:42:53
| 2019-07-11T03:51:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,415
|
py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.fluid as fluid
from paddle import _C_ops
import unittest
paddle.disable_static()
def clear_grad(w, a):
@paddle.no_grad()
def warp(*_):
assert w.grad is not None
_C_ops.scale_(w.grad, 'scale', 0.5)
w.clear_gradient(False)
return warp
class TestInplaceAndClearGradient(unittest.TestCase):
def test(self):
paddle.set_device('cpu')
input_data = np.ones([2, 2]).astype('float32')
w = paddle.to_tensor(input_data, 'float32', stop_gradient=False)
_clear_grad = clear_grad(w, a="1")
w._register_backward_hook(_clear_grad)
for i in range(10):
out = _C_ops.scale(w, 'scale', 0.1)
out.backward()
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
jiweibo.noreply@github.com
|
d5ab006541ddcbc2864ce11bba9ee259f0081089
|
31b47a204c91067a0b8e6eee77a4845ed7e76066
|
/cycle
|
9b95d5ad121a6679f10d82481addb15130f2847f
|
[] |
no_license
|
nickanderson/todo-cycle
|
d43150e9d9e79e69d702dcfd3d4e74dae3784867
|
ca4946e61474f31a5e95478735fc90d09edf3bed
|
refs/heads/master
| 2021-01-19T09:23:43.425570
| 2009-01-07T21:49:01
| 2009-01-07T21:49:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 11,247
|
#!/usr/bin/env python
"""
Author: Nick Anderson
Date: Dec 10 2008
cycle is a todo list manager modeled on "The Cycle" as described in
Time Management for System Administrators By Thomas A. Limoncelli
"""
import os
import subprocess
import datetime, time
import re
import sys
import fileinput
import parsedatetime.parsedatetime as pdt
import parsedatetime.parsedatetime_consts as pdc
from optparse import OptionParser, OptionGroup
__todo_dir__ = os.path.expanduser('~/.todocycle')
__editor__ = 'vim'
def pre_append(newline, filename):
"""
This will append a new line to the begining of a file
newline -- string that you want appended to the file
filename -- string of the filename that you want to prepend to
"""
for n, line in enumerate(fileinput.FileInput(filename,inplace=1)):
if n == 0:
print newline
break
def calendar_iterator(start, direction='next', fmt='%Y-%m-%d'):
"""
Iterates at a daily rate in the direction specified
in time from a given start date
start -- date string in specified format
fmt -- format the date string is in
"""
curr = datetime.date(*time.strptime(start, fmt)[:3])
one = datetime.timedelta(1)
while 1:
if direction == 'next':
curr += one
elif direction == 'previous':
curr -= one
else:
sys.exit('calendar_iterator recieved invalid direction')
yield curr
def find_todo_txt(date, direction, maxdays=14):
"""
Find the next todo.txt in the direction specified up to maxdays
date -- datestring in format Y-m-d
direction -- direction in calandar to look for the todo.txt file
valid directions = 'forward' or 'reverse'
maxdays -- maximum number of days to look back for a previous todo.txt
"""
if direction == 'next':
iter = calendar_iterator(today, direction)
date = iter.next()
return str(date) + '.txt'
elif direction == 'previous':
iter = calendar_iterator(today, direction)
count = 0
while True:
previous_date = str(iter.next())
previous_todotxt = previous_date + '.txt'
if os.path.exists(previous_todotxt):
return previous_todotxt
elif count == maxdays:
print 'no todos found in the previous %s days' % maxdays
return None
else:
count += 1
else:
sys.exit('invalid direction passed to find_todo_txt')
def write_todo(entries, filename):
"""
Write entries to filename, return nothing
entries -- List of task entries
filename -- file to write tas entries to
"""
file = open(filename, 'w')
for entry in entries:
file.write(entry)
file.close()
def get_file_lines(filename):
"""
Return list of strings (lines in file)
filename -- file to load and read
"""
file = open(filename, 'r')
return file.readlines()
def get_unfinished_tasks(tasks):
"""
Return list of unfinished tasks
tasks -- list of tasks to process
"""
unfinished_tasks = []
postponed = re.compile('^>')
done = re.compile('^-')
for task in tasks:
if not postponed.match(task) and not done.match(task):
unfinished_tasks.append(task)
return unfinished_tasks
def get_postponed_tasks(tasks):
"""
Return list of postponed tasks
tasks -- list of tasks to process
"""
postponed_tasks = []
postponed = re.compile('>')
for task in tasks:
if postponed.match(task):
postponed_tasks.append(task)
return postponed_tasks
def get_completed_tasks(tasks):
"""
Return list of completed tasks
tasks -- list of tasks to process
"""
completed_tasks = []
completed = re.compile('^-')
for task in tasks:
if completed.match(task):
completed_tasks.append(task)
return completed_tasks
def get_forward_tasks(file):
"""
Return list of tasks that are to be postponed
file -- file to look for postponed tasks in
"""
forward_tasks = []
for line in file.readlines():
if re.match('^>', line):
forward_tasks.append(line)
return forward_tasks
def get_plain_entries(entries):
"""
Return list of entries that have the finished and postponed flags striped
entries -- list of entries to process
"""
plain_entries = []
for entry in entries:
plain_entries.append(entry.lstrip('->'))
return plain_entries
def push_forward_tasks(tasks, filename):
"""
Push current tasks defined to be forwarded to the next days todo.txt
tasks -- list of current tasks
filename -- file to push postponed tasks to
"""
if not os.path.exists(filename):
lines = []
else:
lines = get_file_lines(filename)
for task in tasks:
task = task.lstrip('>')
if task in lines:
match = True
continue
match = False
if match == False:
lines.append(task)
file = open(filename, 'w')
for line in lines:
file.write(line)
file.flush()
file.close()
def parse_date(datestring):
"""
Take given datestring and return a date in the format Y-m-d
datestring -- parsedatetime compatible datestring
"""
p = pdt.Calendar(pdc.Constants())
date = p.parse(datestring)
year = str(date[0][0])
month = str(date[0][1])
day = str(date[0][2])
if not len(month) == 2:
month = '0' + month
if not len(day) == 2:
day = '0' + day
date = '%s-%s-%s' % (year, month, day)
return date
def get_todo_files_date_range(start_date, end_date):
"""
Return list of txt files that exist for given date range
start_date -- Day to start iteration from (past to present)
end_date -- Day to end iteration
"""
date = start_date
iter = calendar_iterator(start_date, 'next')
todo_txt_dates = []
if start_date == end_date:
if os.path.exists(start_date+'.txt'):
todo_txt_dates.append(start_date+'.txt')
return todo_txt_dates
dates = []
while not date == end_date:
date = str(iter.next())
dates.append(date)
for each in dates:
if os.path.exists(each+'.txt'):
todo_txt_dates.append(each+'.txt')
return todo_txt_dates
def sumarize_todo(todo_file):
"""
Print out a clear summaary of a days tasks
todo_file - string of file name
"""
entries = get_file_lines(todo_file)
unfinished_tasks = get_unfinished_tasks(entries)
postponed_tasks = get_postponed_tasks(entries)
completed_tasks = get_completed_tasks(entries)
if not (len(unfinished_tasks) == 0 and len(postponed_tasks) == 0 and
len(completed_tasks) == 0):
print ""
print "Summary for %s" % todo_file
print "-" * 60
if options.unfinished:
if not len(unfinished_tasks) == 0:
print ""
print "Unfinished tasks:"
for each in unfinished_tasks:
print '\t', each.rstrip()
if options.postponed:
if not len(postponed_tasks) == 0:
print ""
print "Postponed tasks:"
for each in postponed_tasks:
print '\t', each.rstrip().lstrip('>')
if options.completed:
if not len(completed_tasks) == 0:
print ""
print "Completed tasks"
for each in completed_tasks:
print '\t', each.rstrip().lstrip('-')
print "+" * 60, "\n\n"
if __name__ == '__main__':
"""
The main wrapper
"""
if not os.path.exists(__todo_dir__):
os.mkdir(__todo_dir__)
os.chdir(__todo_dir__)
today = time.strftime('%Y-%m-%d', time.localtime())
todo_today = today + '.txt'
parser = OptionParser(usage="%prog [report] [[start date]-[end date]]")
report = OptionGroup(parser, "Report filtering options",
"Only active when using the report action"
"Default behavior is to show all task status unfiltered."
"Applying a filter shows thats that match the filter"
"applied. Filter options can be applied in conjuntion.")
report.add_option("-u", "--unfinished", dest="unfinished",
action="store_true", help="Show unfinished tasks")
report.add_option("-p", "--postponed", dest="postponed",
action="store_true", help="Show postponed tasks")
report.add_option("-c", "--completed", dest="completed",
action="store_true", help="Show completed tasks")
parser.add_option_group(report)
(options, args) = parser.parse_args()
actions = ['report']
if not options.unfinished and not options.postponed and not options.completed:
options.unfinished = True
options.postponed = True
options.completed = True
if not len(args) == 0:
SpecifiedAction = re.compile(args[0])
for action in actions:
if SpecifiedAction.match('report'):
if not len(args) == 1:
daterange = " ".join(args[1:])
daterange = daterange.split('-')
if len(daterange) == 0:
startDate = parse_date('today')
endDate = parse_date('today')
if len(daterange) > 0:
startDate = parse_date(daterange[0])
if len(daterange) == 1:
endDate = startDate
if len(daterange) > 1:
endDate = parse_date(daterange[1])
else:
startDate = parse_date('today')
endDate = startDate
todo_files = get_todo_files_date_range(startDate, endDate)
for each in todo_files:
sumarize_todo(each)
sys.exit()
else:
parser.error("%s is not recognized"%args[0])
last_todo_txt = find_todo_txt(today, 'previous')
current_todo_txt = today + '.txt'
if os.path.exists(todo_today):
current_task_entries = get_file_lines(current_todo_txt)
else:
current_task_entries = []
if not last_todo_txt == None:
previous_task_entries = get_file_lines(last_todo_txt)
previous_unfinished_tasks = get_unfinished_tasks(previous_task_entries)
previous_unfinished_tasks.reverse()
for task in previous_unfinished_tasks:
if not task in get_plain_entries(current_task_entries):
current_task_entries.insert(0,task)
write_todo(current_task_entries, current_todo_txt)
editor_cmd = __editor__ + ' ' + todo_today
subprocess.call(editor_cmd, shell=True)
file = open(todo_today, 'r')
forward_tasks = get_forward_tasks(file)
file.close()
if not len(forward_tasks) == 0:
push_forward_tasks(forward_tasks, find_todo_txt(today, 'next'))
|
[
"nick@anders0n.net"
] |
nick@anders0n.net
|
|
56765239157a3bae4c7e398235b55f0cb9b4bb49
|
038d7dc760c8104f1d891f42ffbcc3b35f471e7e
|
/No1-22-April-2016/3-Rasko_Python_Intro/example_1.py
|
28907cde1102e863dd8a44f4a2176f7941c9a4dd
|
[] |
no_license
|
acmeteam/acme-talks
|
60e34f8fa62989a4307094b777de8d1fcd97c82c
|
bd0583ee46fa369b2fadc9d5f49b1722a413cc59
|
refs/heads/master
| 2020-12-24T07:15:39.540883
| 2016-09-16T14:47:03
| 2016-09-16T14:47:03
| 57,039,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 788
|
py
|
import json
import codecs
import urllib.request as req
################## checking version of local masterlist against cloud ml ##################
#(masterlist.json file needed in the working dir for this to work)
#get cloud masterlist
url = "http://www.plantronics.com/services/mobile/ios_stage/hub/3.8/masterlist.json"
resp = req.urlopen(url).read().decode("utf8")
#get local masterlist
ml_file = codecs.open("masterlist.json", "r", "utf8")
#parse local ml
ml = json.load(ml_file)
mlv = ml['masterlist_schema_version']
#parse downloaded ml
cml = json.loads(resp)
cmlv = cml['masterlist_schema_version']
#check differances (and print them)
print("\n\n\n")
if mlv == cmlv:
print("OK!")
else:
print("WRONG Version!!! - l:{} != c:{}".format(mlv, cmlv))
print("\n\n\n")
|
[
"andrija.milovanovic@pstech.rs"
] |
andrija.milovanovic@pstech.rs
|
b46305f6c2bd6c6ca9c2674b6850e30fcfd28bc7
|
f62db0b32a88adf0a9837818ffec2e27df2b84be
|
/opdracht01/MDP/Labo's Pieter/labo 2.8 part 2.py
|
e8c062a66d6c5e8a0b52045889f20ed38b60b2db
|
[] |
no_license
|
Vic-Rottiers/P2-systeembeheer
|
e4b5642f3b6bfc0acac9ce5d6a083ba69cae10c9
|
db92419916bb28f83ad28698cdf15cdc1fbae777
|
refs/heads/master
| 2023-05-08T11:07:41.687950
| 2021-05-28T12:21:51
| 2021-05-28T12:21:51
| 365,987,419
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 668
|
py
|
from ncclient import manager
import xml.dom.minidom
m = manager.connect(
host = "192.168.56.104",
port="830",
username="cisco",
password="cisco123!",
hostkey_verify=False
)
netconf_data = """
<config>
<native xmlns="http://cisco.com/ns/yang/Cisco-IOS-XE-native">
<interface>
<Loopback>
<name>111</name>
<description>TEST1</description>
<ip>
<address>
<primary>
<address>100.100.100.100</address>
<mask>255.255.255.0</mask>
</primary>
</address>
</ip>
</Loopback>
</interface>
</native>
</config>
"""
netconf_reply = m.edit_config(target="running", config=netconf_data)
print(xml.dom.minidom.parseString(netconf_reply.xml).toprettyxml())
|
[
"vic.rottiers@student.hogent.be"
] |
vic.rottiers@student.hogent.be
|
f457208a86bb5aaf7e0cc989c4142e0cfe39acd6
|
14373275670c1f3065ce9ae195df142146e2c1a4
|
/stubs/boltons/boltons/formatutils.pyi
|
20ac5f0a83b0bb32bbae800a27bde67ba4ad70b9
|
[
"Apache-2.0",
"MIT"
] |
permissive
|
sobolevn/typeshed
|
eb7af17c06a9722f23c337e6b9a4726223155d58
|
d63a82640390a9c130e0fe7d409e8b0b836b7c31
|
refs/heads/master
| 2023-08-04T05:59:29.447015
| 2023-06-14T21:27:53
| 2023-06-14T21:27:53
| 216,265,622
| 2
| 0
|
Apache-2.0
| 2022-02-08T10:40:53
| 2019-10-19T20:21:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,231
|
pyi
|
from collections.abc import Callable
from typing import Any
def construct_format_field_str(fname: str, fspec: str, conv: str) -> str: ...
def infer_positional_format_args(fstr: str) -> str: ...
def get_format_args(fstr: str) -> tuple[list[tuple[int, type]], list[tuple[str, type]]]: ...
def tokenize_format_str(fstr: str, resolve_pos: bool = True) -> list[str | BaseFormatField]: ...
class BaseFormatField:
def __init__(self, fname: str, fspec: str = "", conv: str | None = None) -> None: ...
base_name: str
fname: str
subpath: str
is_positional: bool
def set_fname(self, fname: str) -> None: ...
subfields: list[str]
fspec: str
type_char: str
type_func: str
def set_fspec(self, fspec) -> None: ...
conv: str
conv_func: str | None
def set_conv(self, conv: str) -> None: ...
@property
def fstr(self) -> str: ...
class DeferredValue:
func: Callable[..., Any]
cache_value: bool
def __init__(self, func: Callable[..., Any], cache_value: bool = True) -> None: ...
def get_value(self) -> Any: ...
def __int__(self) -> int: ...
def __float__(self) -> float: ...
def __unicode__(self) -> str: ...
def __format__(self, fmt: str) -> str: ...
|
[
"noreply@github.com"
] |
sobolevn.noreply@github.com
|
f1a9d42270997eb481ab4e158959025d989c22ed
|
0d8bf7aac9d6d922e44a4d9a98c8e2e0dff8db44
|
/Detector_yolov4_PART1.py
|
b055c75a58ba2af9f97fc5f4f546aff28f4a254f
|
[] |
no_license
|
abcom-mltutorials/Object-Detection-And-Tracking
|
c1d8df6f50ca6964678db64709a9545d168c2512
|
3a69c5d6c45ed16c000647676c6b304a2a9af180
|
refs/heads/master
| 2023-03-07T13:51:12.868851
| 2021-02-20T12:27:37
| 2021-02-20T12:27:37
| 334,087,872
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,414
|
py
|
#================================================
Copyright @ 2020 **ABCOM Information Systems Pvt. Ltd.** All Rights Reserved.
Licensed under the Apache Licaense, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
#================================================
# Object detection using YOLO V4
# Detector_yolov4_PART1.py
import numpy as np
import cv2
import warnings
warnings.filterwarnings('ignore')
from yolov4.tf import YOLOv4
# Initializing YOLOv4
yolo = YOLOv4()
# setting 'COCO' class names
yolo.classes = "files/coco.names"
# Creating model
yolo.make_model()
# Initializing model to pre-trained state
yolo.load_weights("files/yolov4.weights", weights_type="yolo")
inp = int(input('Choose the format for detecting objects : \n 1.Image \n 2.Video \n'))
if inp == 1: #for image
yolo.inference(media_path="data/image00.jpg")
elif inp == 2:#for video
yolo.inference(media_path="data/video00.mp4", is_image=False)
|
[
"abcom-mltutorials@gmail.com"
] |
abcom-mltutorials@gmail.com
|
1fe8f0329e0ca522ed58c0634c6952190db00db5
|
f324780d2187172bc1eabaddb23565c3dabeed31
|
/snake/__main__.py
|
b41428f3f632aa4a6eaeae1ca92cef57ad043c88
|
[] |
no_license
|
pierkoo/snake
|
92c43d0ff8e8081f8262f337d023e0039d3a3b27
|
5f1cac60cf308509c919e08e091d0e8c8ba1beb8
|
refs/heads/master
| 2023-05-11T23:47:59.075954
| 2021-06-01T21:11:08
| 2021-06-01T21:11:08
| 365,058,293
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 101
|
py
|
from game import SnakeGame
if __name__ == "__main__":
snake = SnakeGame()
snake.main_loop()
|
[
"pierkoo@gmail.com"
] |
pierkoo@gmail.com
|
4b9b4953e500a5f6dfa1db37177b48ce6b06b8ae
|
6379deba5de738bb4bace7c7f790c3717d44d4ae
|
/homework4/tests/test_calculate_average.py
|
4c81ed0bb089d8224d6f6ec7e7d7efd67c60ca5f
|
[] |
no_license
|
OSUsatoru/cs362_practice_git
|
62e23e2d6521f0e31770f943c1a257c61536716a
|
8f55934ff5eceda61f8fe00a7daca389394bfec4
|
refs/heads/master
| 2023-03-17T20:56:25.144930
| 2021-03-08T05:05:13
| 2021-03-08T05:05:13
| 331,689,331
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 477
|
py
|
import unittest
import calculate_average
class test_case_average(unittest.TestCase):
def setUp(self):
self.a = [1,2,3,4,5]
self.b = []
self.c = [2,"str1","str2"]
def test_cube(self):
self.assertEqual(3, calculate_average.cal_ave(self.a))
self.assertEqual("Empty list", calculate_average.cal_ave(self.b))
self.assertEqual("Invalid input", calculate_average.cal_ave(self.c))
if __name__ == "__main__":
unittest.main()
|
[
"yamamsat@oregonstate.edu"
] |
yamamsat@oregonstate.edu
|
338ceea0c209b5700e116c544e2b9a510486785f
|
2cd986dee35becd91cbd93ba7b35414e80e33f4f
|
/venv/lib/python3.7/encodings/cp855.py
|
4a1ca4424c182b4f9e567fcd604971544289aa4b
|
[] |
no_license
|
HernanG234/sensors_async_sim
|
95ccb4e4fd188b8fc5be206bae3af4302f23e3e2
|
fce3701e2a0865198c0977ede5f42da77b8a7853
|
refs/heads/master
| 2020-05-07T09:43:57.354059
| 2019-05-11T19:07:23
| 2019-05-11T19:07:23
| 180,387,282
| 0
| 1
| null | 2019-04-29T15:42:53
| 2019-04-09T14:37:38
|
Python
|
UTF-8
|
Python
| false
| false
| 43
|
py
|
/usr/local/lib/python3.7/encodings/cp855.py
|
[
"hernan.gonzalez@incluit.com"
] |
hernan.gonzalez@incluit.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.