blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bfbe23ea237a6fece6340961f1e41b21a9cd1481 | 16eac80ebb68689c31bdf13c7ac903532fba315b | /weather/helpers.py | a0a489d0ed6fd58d0ac0af15d5e0622a8997e60e | [] | no_license | sthapa123/NWS-Hourly-Forecast-Analysis | f58e600b8156e7c47345e59bd4bedab19bec9781 | 4cbd7372cee14f28123404edf417b55b946b0db2 | refs/heads/master | 2022-04-08T12:06:39.268374 | 2020-03-13T19:08:36 | 2020-03-13T19:08:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 623 | py | __all__ = ["days_ago", "short_isotime", "isodate"]
from datetime import datetime, timedelta, date
def gen_analysis_key(start, end):
if not isinstance(start, date):
start = datetime.strptime(start, "%Y-%m-%d")
if not isinstance(end, date):
end = datetime.strptime(end, "%Y-%m-%d")
start = isodate(start)
end = isodate(end)
return 's{}e{}'.format(start, end)
def days_ago(days):
return datetime.utcnow() - timedelta(days=days)
def short_isotime(dt):
return dt.isoformat().split('.')[0]
def isodate(dt):
return dt.isoformat().split('T')[0]
| [
"ryanp54@yahoo.com"
] | ryanp54@yahoo.com |
919ec5668529c8b954200dd2623656a3d771a752 | 8039fd72bc4d7a7fc89ded9d63b47f020e9d95a5 | /app/recipe/tests/test_ingredients_api.py | 77c03c142401b118f4cc30bf4e11cb49be70de61 | [
"MIT"
] | permissive | vturbin/recipe-app-api | 543b5439f73299c3db12638c18df51adcc740108 | edec6371863ea7bd3aec95f6472fa66e0db5386b | refs/heads/main | 2023-01-16T06:55:29.750405 | 2020-11-22T17:06:21 | 2020-11-22T17:06:21 | 307,793,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,427 | py | from django.contrib.auth import get_user_model
from django.urls import reverse
from django.test import TestCase
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENTS_URL = reverse('recipe:ingredient-list')
class PublicIngredientsApiTests(TestCase):
"""Test the publicly available ingredients API"""
def setUp(self):
self.client = APIClient()
def test_login_required(self):
"""Test that login is required to access the endpoint"""
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientsApiTests(TestCase):
"""Test the private ingredients can be retrieved by authorized user"""
def setUp(self):
self.client = APIClient()
self.user = get_user_model().objects.create_user(
'test@london.com',
'testpass'
)
self.client.force_authenticate(self.user)
def test_retrieve_ingredient_list(self):
"""Test retrieving a list of ingredients"""
Ingredient.objects.create(user=self.user, name="Kale")
Ingredient.objects.create(user=self.user, name="Salt")
res = self.client.get(INGREDIENTS_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_ingredients_limited_to_user(self):
"""Test that ingredients for the authenticated user are returned"""
user2 = get_user_model().objects.create_user(
'other@london.com',
'testpass'
)
Ingredient.objects.create(user=user2, name='Vinegar')
ingredient = Ingredient.objects.create(user=self.user, name='Tumeric')
res = self.client.get(INGREDIENTS_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ingredient.name)
def test_create_ingredient_successful(self):
"""Test create a new ingredient"""
payload = {'name': 'Cabbage'}
self.client.post(INGREDIENTS_URL, payload)
exists = Ingredient.objects.filter(
user=self.user,
name=payload['name'],
).exists()
self.assertTrue(exists)
def test_create_ingredient_invalid(self):
"""Test creating invalid ingredients fails"""
payload = {'name': ''}
res = self.client.post(INGREDIENTS_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipes(self):
"""Test filtering ingredients by those assigned to recipes"""
ingredient1 = Ingredient.objects.create(
user=self.user, name='Apples'
)
ingredient2 = Ingredient.objects.create(
user=self.user, name='Turkey'
)
recipe = Recipe.objects.create(
title='Apple crumble',
time_minutes=5,
price=10.00,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredient_assigned_unique(self):
"""Test filtering ingredients by assigned returns unique items"""
ingredient = Ingredient.objects.create(user=self.user, name='Eggs')
Ingredient.objects.create(user=self.user, name='Cheese')
recipe1 = Recipe.objects.create(
title='Eggs benedict',
time_minutes=30,
price=12.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='Green eggs on toast',
time_minutes=20,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENTS_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| [
"v.turbins@smart-iot.solutions"
] | v.turbins@smart-iot.solutions |
2d78ec6bc312d47fc94f57d817cd005695b414fe | 1b87d5f7cba7e068f7b2ea902bba494599d20a78 | /contrib/wydget/wydget/__init__.py | fe326d70a56e2ec6b127a655b859e95144cddc65 | [
"BSD-3-Clause"
] | permissive | jpaalasm/pyglet | 906d03fe53160885665beaed20314b5909903cc9 | bf1d1f209ca3e702fd4b6611377257f0e2767282 | refs/heads/master | 2021-01-25T03:27:08.941964 | 2014-01-25T17:50:57 | 2014-01-25T17:50:57 | 16,236,090 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 10,005 | py | '''wydget is a graphical user interface (GUI) toolkit for pyglet.
This module allows applications to create a user interface comprised of
widgets and attach event handling to those widgets.
GUIs are managed by the top-level GUI class::
from pyglet.window import Window
from wydget import GUI
window = Window(100, 100)
gui = GUI(window)
window.push_handlers(gui)
You may then add components to the GUI by importing them from the
`wydget.widgets` package::
from wydget.widgets import TextButton
b = TextButton(gui, 'Press me!')
To handle click events on the button, create a handler::
@gui.select(b)
def on_click(button, *args):
print 'I was pressed!'
Finally, use a standard pyglet event loop to have the GUI run, and invoke
``gui.draw()`` to render the GUI. The GUI will render to an area the
dimensions of the window and at z = 0::
while not window.has_exit:
window.dispatch_events()
window.clear()
gui.draw()
window.flip()
'''
import sys
import collections
from xml.etree import ElementTree
from pyglet.gl import *
from pyglet import media
import style
import event
import loadxml
import widgets
import util
class GUI(event.GUIEventDispatcher):
'''GUI oganisation and event handling.
'''
id = '-gui'
name = 'gui'
classes = ()
parent = None
def __init__(self, window, x=0, y=0, z=0, width=None, height=None):
super(GUI, self).__init__()
self.window = window
self.style = style.Style()
# element.Element stuff
self.x, self.y, self.z = x, y, z
self.width = self.inner_width = width or window.width
self.height = self.inner_height = height or window.height
self.children = []
# map Element id, class and name to Element
self._by_id = {}
self._by_class = collections.defaultdict(set)
self._by_name = collections.defaultdict(set)
# list Element.ids in the order they're registered for tabbing
self._focus_order = []
self.debug_display = None
self.debug = '--debug' in sys.argv
if self.debug:
self.debug_display = widgets.Label(self, 'dummy',
bgcolor="white", padding=1, width=self.width)
def __repr__(self):
return '<%s at (%s, %s, %s) (%sx%s)>'%(self.__class__.__name__,
self.x, self.y, self.z, self.width, self.height)
def dump(self, s=''):
print s + str(self)
for child in self.children: child.dump(s+' ')
# clipboard support
clipboard_element = None
def setSelection(self, element):
'''The element has some data that may interact with the clipboard.
'''
if self.clipboard_element not in (element, None):
self.clipboard_element.clearSelection()
self.clipboard_element = element
def clearSelection(self, element):
'''The element doesn't want to interact with the clipboard any
longer.
'''
# might already have been bumped for another
if self.clipboard_element is element:
self.clipoard_element = None
# Registration of elements
# XXX I suspect that this is duplicating functionality in layout
def register(self, element):
'''Register the element with the gui.
IDs must be unique.
'''
if element.id in self._by_id:
raise KeyError, 'ID %r already exists as %r (trying to add %r)'%(
element.id, self._by_id[element.id], element)
self._by_id[element.id] = element
self._by_name[element.name].add(element.id)
for klass in element.classes:
self._by_class[klass].add(element.id)
if element.is_focusable:
self._focus_order.append(element.id)
self.setDirty()
self._layout_needed = True
def unregister(self, element):
del self._by_id[element.id]
self._by_name[element.name].remove(element.id)
for klass in element.classes:
self._by_class[klass].remove(element.id)
if self.focused_element is element:
self.focused_element = None
if element.is_focusable:
self._focus_order.remove(element.id)
self.setDirty()
self._layout_needed = True
def has(self, spec):
if spec[0] == '#':
return spec[1:] in self._by_id
elif spec[0] == '.':
return spec[1:] in self._by_class
else:
return spec in self._by_name
def get(self, spec):
if spec[0] == '#':
return self._by_id[spec[1:]]
elif spec[0] == '.':
return (self._by_id[id] for id in self._by_class[spec[1:]])
else:
return (self._by_id[id] for id in self._by_name[spec])
# rendering / hit detection
_rects = None
def setDirty(self):
'''Indicate that one or more of the gui's children have changed
geometry and a new set of child rects is needed.
'''
self._rects = None
_layout_needed = True
def layout(self):
'''Layout the entire GUI in response to its dimensions changing or
the contents changing (in a way that would alter internal layout).
'''
#print '>'*75
#self.dump()
# resize all elements
while True:
for element in self.children:
element.resetGeometry()
ok = False
try:
while not ok:
ok = True
for element in self.children:
ok = ok and element.resize()
except util.RestartLayout:
pass
else:
break
# position top-level elements
for c in self.children:
if c.x is None or c.x_spec.percentage:
c.x = c.x_spec.calculate()
if c.y is None or c.y_spec.percentage:
c.y = c.y_spec.calculate()
self._rects = None
self._layout_needed = False
#print '-'*75
#self.dump()
#print '<'*75
def layoutNeeded(self):
self._layout_needed = True
def getRects(self, exclude=None):
'''Get the rects for all the children to draw & interact with.
Prune the tree at "exclude" if provided.
'''
if self._layout_needed:
try:
self.layout()
except:
print '*'*75
self.dump()
print '*'*75
raise
if self._rects is not None and exclude is None:
return self._rects
# now get their rects
rects = []
clip = self.rect
for element in self.children:
if element is exclude: continue
rects.extend(element.getRects(clip, exclude))
rects.sort(lambda a,b: cmp(a[1][2], b[1][2]))
if exclude is None:
self._rects = rects
return rects
def determineHit(self, x, y, exclude=None):
'''Determine which element is at the absolute (x, y) position.
"exclude" allows us to ignore a single element (eg. an element
under the cursor being dragged - we wish to know which element is
under *that)
'''
for o, (ox, oy, oz, clip) in reversed(self.getRects(exclude)):
ox += clip.x
oy += clip.y
if x < ox or y < oy: continue
if x > ox + clip.width: continue
if y > oy + clip.height: continue
return o
return None
def draw(self):
'''Render all the elements on display.'''
glPushAttrib(GL_ENABLE_BIT)
glDisable(GL_DEPTH_TEST)
# get the rects and sort by Z (yay for stable sort!)
rects = self.getRects()
# draw
oz = 0
for element, (x, y, z, c) in rects:
if element is self.debug_display:
continue
element.draw(x, y, z, c)
if self.debug:
# render the debugging displays
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glEnable(GL_BLEND)
for o, (x, y, z, c) in rects:
w, h = c.width, c.height
x += c.x
y += c.y
glColor4f(1, 0, 0, .1)
glRectf(x, y, x+w, y+h)
glColor4f(1, 1, 1, .1)
glBegin(GL_LINE_LOOP)
glVertex2f(x, y)
glVertex2f(x+w, y)
glVertex2f(x+w, y+h)
glVertex2f(x, y+h)
glEnd()
if o.view_clip:
v = o.view_clip
glColor4f(0, 0, 1, .1)
glRectf(x+v.x, y+v.y, x+v.x+v.width, y+v.y+v.height)
glDisable(GL_BLEND)
self.debug_display.draw(0, 0, 0, util.Rect(0, 0,
self.width, self.debug_display.height))
glPopAttrib()
# Element API (mostly terminators)
def getStyle(self): return self.style
def getGUI(self): return self
def isEnabled(self): return True
def isVisible(self): return True
def getParent(self, selector):
if isinstance(selector, str):
selector = [s.strip() for s in selector.split(',')]
if self.name in selector: return self
return None
def calculateAbsoluteCoords(self, x, y):
return (x + self.x, y + self.y)
def calculateRelativeCoords(self, x, y):
return (x - self.x, y - self.y)
def layoutDimensionsChanged(self, layout):
pass
padding = 0
def get_rect(self):
return util.Rect(0, 0, self.width, self.height)
rect = property(get_rect)
inner_rect = property(get_rect)
def addChild(self, child):
self.children.append(child)
self.register(child)
def delete(self):
for child in self.children: child.delete()
self.children = []
| [
"joonas.paalasmaa@gmail.com"
] | joonas.paalasmaa@gmail.com |
05e64eeb45294e4e372816d49ce3ba6960080540 | 615bbf1429b51458b4c7db3e63cf30fcf312b37d | /python/messages/command_data/__init__.py | 33ad7438162351c34ad3c1c37a0514145a213a8f | [] | no_license | shahar481/remote-mem-editor | 5d42d90718187c1d3d53d58c611c9d0f29587cd4 | e987f9a1f94879331652ed0f85c67707de015262 | refs/heads/master | 2022-12-28T19:46:11.449116 | 2020-10-08T15:27:58 | 2020-10-08T15:27:58 | 299,881,045 | 0 | 0 | null | 2020-10-08T15:28:00 | 2020-09-30T10:13:07 | Python | UTF-8 | Python | false | false | 67 | py | from messages.command_data.command_data import CommandDataMessage
| [
"sshaharse1@gmail.com"
] | sshaharse1@gmail.com |
640e9d4c72e6502ed5b447c2352db355ec85f291 | 10dca81be35594d01065f1c41b654a6ad8a711c3 | /mysite/ag53/migrations/0005_auto_20150620_0603.py | 1c501ddea341b4299d2eb76e323ca1700bd376ea | [] | no_license | kb0304/Login_signup_app | b4b9b67fc18aa8201d07cc6992b5820c91d8573b | 1d5ad299c490f14c247552ac56651aba48c0d62e | refs/heads/master | 2021-01-16T21:14:18.493988 | 2015-07-26T23:54:10 | 2015-07-26T23:54:10 | 35,742,050 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ag53', '0004_profile_user'),
]
operations = [
migrations.RemoveField(
model_name='email',
name='profile',
),
migrations.DeleteModel(
name='Email',
),
]
| [
"karan@Karans-MacBook-Air.local"
] | karan@Karans-MacBook-Air.local |
39ccfa1bfe82238f935dda6943bcfeabd47426bd | f200651e624d5e5cd2f2262359a5932216d2d443 | /demo-effects-html5-canvas/fire_controls/conv.py | 2431b8a237fa8e6630f04a1d1e18c70d1a4332e7 | [] | no_license | lwerdna/lwerdna.github.io | fbea38c62029884930ebfac70c9d455979c43fde | f80c7cb173359e13b2894d64fb735c0396278b7e | refs/heads/master | 2023-07-19T17:07:20.169897 | 2023-07-07T18:39:02 | 2023-07-07T18:39:02 | 38,472,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | palette = [
[0,0,0], [0,1,1], [0,4,5], [0,7,9], [0,8,11], [0,9,12], [15,6,8], [25,4,4],
[33,3,3], [40,2,2], [48,2,2], [55,1,1], [63,0,0], [63,0,0], [63,3,0], [63,7,0],
[63,10,0], [63,13,0], [63,16,0], [63,20,0], [63,23,0], [63,26,0], [63,29,0],
[63,33,0], [63,36,0], [63,39,0], [63,39,0], [63,40,0], [63,40,0], [63,41,0],
[63,42,0], [63,42,0], [63,43,0], [63,44,0], [63,44,0], [63,45,0], [63,45,0],
[63,46,0], [63,47,0], [63,47,0], [63,48,0], [63,49,0], [63,49,0], [63,50,0],
[63,51,0], [63,51,0], [63,52,0], [63,53,0], [63,53,0], [63,54,0], [63,55,0],
[63,55,0], [63,56,0], [63,57,0], [63,57,0], [63,58,0], [63,58,0], [63,59,0],
[63,60,0], [63,60,0], [63,61,0], [63,62,0], [63,62,0], [63,63,0],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63],
[63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63], [63,63,63]
]
for p in palette:
print '[%d,%d,%d],' % (p[0]*4, p[1]*4, p[2]*4)
| [
"andrew@vector35.com"
] | andrew@vector35.com |
994e8779646749a4cf313d90030d6d16cddf276b | bc3faef4df41d04dd080a10de084d50962861b55 | /4. Sensitivity Analysis, Appendix A/DL_FP_EC_RB.py | 0b9ed080f0eea81171c8094723db9e4a8a865cbb | [
"MIT"
] | permissive | rioarya/IDN_LC_WoodyBiomass | 3f65ab79b759b8f9e3a11872dc83857a6b15acf1 | 0042fd4333212e65735f3643ecb59971d1bd9466 | refs/heads/main | 2023-07-28T04:23:39.737572 | 2021-09-14T05:23:14 | 2021-09-14T05:23:14 | 404,382,744 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112,493 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 15:21:55 2019
@author: raryapratama
"""
#%%
#Step (1): Import Python libraries, set land conversion scenarios general parameters
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import quad
import seaborn as sns
import pandas as pd
#DL_FP_S1 Scenario
##Set parameters
#Parameters for primary forest
initAGB = 233 #source: van Beijma et al. (2018)
initAGB_min = 233-72
initAGB_max = 233 + 72
#parameters for timber plantation. Source: Khasanah et al. (2015)
tf = 201
a = 0.082
b = 2.53
#%%
#Step (2_1): C loss from the harvesting/clear cut
df1_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df1_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df1_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
df1_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
t = range(0,tf,1)
c_firewood_energy_S1_Ac7 = df1_Ac7['Firewood_other_energy_use'].values
c_firewood_energy_S1_Ac18 = df1_Ac18['Firewood_other_energy_use'].values
c_firewood_energy_S1_Tgr40 = df1_Tgr40['Firewood_other_energy_use'].values
c_firewood_energy_S1_Tgr60 = df1_Tgr60['Firewood_other_energy_use'].values
c_firewood_energy_E_Hbr40 = dfE_Hbr40['Firewood_other_energy_use'].values
#%%
#Step (2_2): C loss from the harvesting/clear cut as wood pellets
dfE = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
c_pellets_Hbr_40y = dfE['Wood_pellets'].values
#%%
#Step (3): Aboveground biomass (AGB) decomposition
#Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
tf = 201
t = np.arange(tf)
decomp_tot_S1_Ac_7y = df['C_remainAGB'].values
#S1_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
tf = 201
t = np.arange(tf)
decomp_tot_S1_Ac_18y = df['C_remainAGB'].values
#S1_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
tf = 201
t = np.arange(tf)
decomp_tot_S1_Tgr_40y = df['C_remainAGB'].values
#S1_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
decomp_tot_S1_Tgr_60y = df['C_remainAGB'].values
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
decomp_tot_E_Hbr_40y = df['C_remainAGB'].values
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (4): Dynamic stock model of in-use wood materials
from dynamic_stock_model import DynamicStockModel
df1_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df1_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df1_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
df1_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
#product lifetime
#paper
P = 4
#furniture
F = 20
#building materials
B = 35
TestDSM1_Ac7 = DynamicStockModel(t = df1_Ac7['Year'].values, i = df1_Ac7['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([P]), 'StdDev': np.array([0.3*P])})
TestDSM1_Ac18 = DynamicStockModel(t = df1_Ac18['Year'].values, i = df1_Ac18['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([F]), 'StdDev': np.array([0.3*F])})
TestDSM1_Tgr40 = DynamicStockModel(t = df1_Tgr40['Year'].values, i = df1_Tgr40['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSM1_Tgr60 = DynamicStockModel(t = df1_Tgr60['Year'].values, i = df1_Tgr60['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
TestDSME_Hbr40 = DynamicStockModel(t = dfE_Hbr40['Year'].values, i = dfE_Hbr40['Input_PF'].values, lt = {'Type': 'Normal', 'Mean': np.array([B]), 'StdDev': np.array([0.3*B])})
CheckStr1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.dimension_check()
CheckStr1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.dimension_check()
CheckStr1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.dimension_check()
CheckStr1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.dimension_check()
CheckStrE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.dimension_check()
Stock_by_cohort1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_s_c_inflow_driven()
Stock_by_cohort1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_s_c_inflow_driven()
Stock_by_cohort1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_s_c_inflow_driven()
Stock_by_cohort1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_s_c_inflow_driven()
Stock_by_cohortE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_s_c_inflow_driven()
S1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_stock_total()
S1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_stock_total()
S1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_stock_total()
S1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_stock_total()
SE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_stock_total()
O_C1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_o_c_from_s_c()
O_C1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_o_c_from_s_c()
O_C1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_o_c_from_s_c()
O_C1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_o_c_from_s_c()
O_CE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_o_c_from_s_c()
O1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_outflow_total()
O1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_outflow_total()
O1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_outflow_total()
O1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_outflow_total()
OE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_outflow_total()
DS1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.compute_stock_change()
DS1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.compute_stock_change()
DS1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.compute_stock_change()
DS1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.compute_stock_change()
DSE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.compute_stock_change()
Bal1_Ac7, ExitFlag1_Ac7 = TestDSM1_Ac7.check_stock_balance()
Bal1_Ac18, ExitFlag1_Ac18 = TestDSM1_Ac18.check_stock_balance()
Bal1_Tgr40, ExitFlag1_Tgr40 = TestDSM1_Tgr40.check_stock_balance()
Bal1_Tgr60, ExitFlag1_Tgr60 = TestDSM1_Tgr60.check_stock_balance()
BalE_Hbr40, ExitFlagE_Hbr40 = TestDSME_Hbr40.check_stock_balance()
#print output flow
print(TestDSM1_Ac7.o)
print(TestDSM1_Ac18.o)
print(TestDSM1_Tgr40.o)
print(TestDSM1_Tgr60.o)
print(TestDSME_Hbr40.o)
#%%
#Step (5): Biomass growth
## one-year gap between rotation cycle
# A. crassicarpa (Source: Anitha et al., 2015; Adiriono, 2009). Code: Ac
tf_Ac_7y = 8
tf_Ac_18y = 19
A1 = range(1,tf_Ac_7y,1)
A2 = range(1,tf_Ac_18y,1)
#calculate the biomass and carbon content of A. crassicarpa over time (7y)
def Y_Ac_7y(A1):
return 44/12*1000*np.exp(4.503-(2.559/A1))
output_Y_Ac_7y = np.array([Y_Ac_7y(A1i) for A1i in A1])
print(output_Y_Ac_7y)
#insert 0 value to the first element of the output result
output_Y_Ac_7y = np.insert(output_Y_Ac_7y,0,0)
print(output_Y_Ac_7y)
#calculate the biomass and carbon content of A. crassicarpa over time (18y)
def Y_Ac_18y(A2):
return 44/12*1000*np.exp(4.503-(2.559/A2))
output_Y_Ac_18y = np.array([Y_Ac_18y(A2i) for A2i in A2])
print(output_Y_Ac_18y)
#insert 0 value to the first element of the output result
output_Y_Ac_18y = np.insert(output_Y_Ac_18y,0,0)
print(output_Y_Ac_18y)
##26 times 8-year cycle (+1 year gap after the FP harvest)of new AGB of A. crassicarpa (7y), zero year gap between the cycle
counter_7y = range(0,26,1)
y_Ac_7y = []
for i in counter_7y:
y_Ac_7y.append(output_Y_Ac_7y)
flat_list_Ac_7y = []
for sublist in y_Ac_7y:
for item in sublist:
flat_list_Ac_7y.append(item)
#the length of the list is now 208, so we remove the last 7 elements of the list to make the len=tf
flat_list_Ac_7y = flat_list_Ac_7y[:len(flat_list_Ac_7y)-7]
print(len(flat_list_Ac_7y))
##11 times 19-year cycle (+1 year gap after the FP harvest) of new AGB of A. crassicarpa (18y), zero year gap between the cycle
counter_18y = range(0,11,1)
y_Ac_18y = []
for i in counter_18y:
y_Ac_18y.append(output_Y_Ac_18y)
flat_list_Ac_18y = []
for sublist in y_Ac_18y:
for item in sublist:
flat_list_Ac_18y.append(item)
#the length of the list is now 209, so we remove the last 8 elements of the list to make the len=tf
flat_list_Ac_18y = flat_list_Ac_18y[:len(flat_list_Ac_18y)-8]
#####Check the flat list length for Hbr
## T. grandis (Source: Anitha et al., 2015; Adiriono, 2009). Code: Tgr
tf_Tgr_40y = 41
tf_Tgr_60y = 61
T1 = range(0,tf_Tgr_40y,1)
T2 = range(0,tf_Tgr_60y,1)
#calculate the biomass and carbon content of T. grandis over time (40y)
def Y_Tgr_40y(T1):
return 44/12*1000*2.114*(T1**0.941)
output_Y_Tgr_40y = np.array([Y_Tgr_40y(T1i) for T1i in T1])
print(output_Y_Tgr_40y)
#calculate the biomass and carbon content of T. grandis over time (60y)
def Y_Tgr_60y(T2):
return 44/12*1000*2.114*(T2**0.941)
output_Y_Tgr_60y = np.array([Y_Tgr_60y(T2i) for T2i in T2])
print(output_Y_Tgr_60y)
##5 times 41-year cycle of new AGB of T. grandis (40y), zero year gap between the cycle
counter_40y = range(0,5,1)
y_Tgr_40y = []
for i in counter_40y:
y_Tgr_40y.append(output_Y_Tgr_40y)
flat_list_Tgr_40y = []
for sublist in y_Tgr_40y:
for item in sublist:
flat_list_Tgr_40y.append(item)
#the length of the list is now 205, so we remove the last 4 elements of the list to make the len=tf
flat_list_Tgr_40y = flat_list_Tgr_40y[:len(flat_list_Tgr_40y)-4]
##4 times 60-year cycle of new AGB of T. grandis (60y), zero year gap between the cycle
counter_60y = range(0,4,1)
y_Tgr_60y = []
for i in counter_60y:
y_Tgr_60y.append(output_Y_Tgr_60y)
flat_list_Tgr_60y = []
for sublist in y_Tgr_60y:
for item in sublist:
flat_list_Tgr_60y.append(item)
#the length of the list is now 244, so we remove the last 43 elements of the list to make the len=tf
flat_list_Tgr_60y = flat_list_Tgr_60y[:len(flat_list_Tgr_60y)-43]
## H. brasiliensis (Source: Guillaume et al., 2018). Code: Hbr
tf_Hbr_40y = 41
H1 = range(0,tf_Hbr_40y,1)
#calculate the biomass and carbon content of H. brasiliensis over time (40y)
def Y_Hbr_40y(H1):
return 44/12*1000*1.55*H1
output_Y_Hbr_40y = np.array([Y_Hbr_40y(H1i) for H1i in H1])
print(output_Y_Hbr_40y)
##5 times 40-year cycle of new AGB of H. brasiliensis (40y), zero year gap between the cycle
counter_40y = range(0,5,1)
y_Hbr_40y = []
for i in counter_40y:
y_Hbr_40y.append(output_Y_Hbr_40y)
flat_list_Hbr_40y = []
for sublist in y_Hbr_40y:
for item in sublist:
flat_list_Hbr_40y.append(item)
#the length of the list is now 205, so we remove the last 4 elements of the list to make the len=tf
flat_list_Hbr_40y = flat_list_Hbr_40y[:len(flat_list_Hbr_40y)-4]
#plotting
t = range (0,tf,1)
plt.xlim([0, 200])
plt.plot(t, flat_list_Ac_7y, color='lightcoral')
plt.plot(t, flat_list_Ac_18y, color='deeppink')
plt.plot(t, flat_list_Hbr_40y, color='darkviolet')
plt.plot(t, flat_list_Tgr_40y)
plt.plot(t, flat_list_Tgr_60y, color='seagreen')
#plt.fill_between(t, flat_list_nucleus, flat_list_plasma, color='darkseagreen', alpha='0.4')
plt.xlabel('Time (year)')
plt.ylabel('AGB (tC/ha)')
plt.show()
##Yearly sequestration
## A. crassicarpa (7y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Ac_7y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Ac_7y = [p - q for q, p in zip(flat_list_Ac_7y, flat_list_Ac_7y[1:])]
#since there is no sequestration between the replanting year (e.g., year 7 to 8), we have to replace negative numbers in 'flat_list_Ac_7y' with 0 values
flat_list_Ac_7y = [0 if i < 0 else i for i in flat_list_Ac_7y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Ac_7y.insert(0,var)
#make 'flat_list_Ac_7y' elements negative numbers to denote sequestration
flat_list_Ac_7y = [ -x for x in flat_list_Ac_7y]
print(flat_list_Ac_7y)
##A. crassicarpa (18y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Ac_18y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Ac_18y = [t - u for u, t in zip(flat_list_Ac_18y, flat_list_Ac_18y[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Ac_18y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Ac_18y = [0 if i < 0 else i for i in flat_list_Ac_18y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Ac_18y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Ac_18y = [ -x for x in flat_list_Ac_18y]
print(flat_list_Ac_18y)
##T. grandis (40y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Tgr_40y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Tgr_40y = [b - c for c, b in zip(flat_list_Tgr_40y, flat_list_Tgr_40y[1:])]
#since there is no sequestration between the replanting year (e.g., year 40 to 41), we have to replace negative numbers in 'flat_list_Tgr_40y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Tgr_40y = [0 if i < 0 else i for i in flat_list_Tgr_40y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Tgr_40y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Tgr_40y = [-x for x in flat_list_Tgr_40y]
print(flat_list_Tgr_40y)
##T. grandis (60y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Tgr_60y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Tgr_60y = [k - l for l, k in zip(flat_list_Tgr_60y, flat_list_Tgr_60y[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Tgr_60y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Tgr_60y = [0 if i < 0 else i for i in flat_list_Tgr_60y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Tgr_60y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Tgr_60y = [ -x for x in flat_list_Tgr_60y]
print(flat_list_Tgr_60y)
##H. brasiliensis (40y)
#find the yearly sequestration by calculating the differences between elements in list 'flat_list_Hbr_40y(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
flat_list_Hbr_40y = [c - d for d, c in zip(flat_list_Hbr_40y, flat_list_Hbr_40y[1:])]
#since there is no sequestration between the replanting year (e.g., year 25 to 26), we have to replace negative numbers in 'flat_list_Hbr_40y' with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
flat_list_Hbr_40y = [0 if i < 0 else i for i in flat_list_Hbr_40y]
#insert 0 value to the list as the first element, because there is no sequestration in year 0
var = 0
flat_list_Hbr_40y.insert(0,var)
#make 'flat_list_plasma' elements negative numbers to denote sequestration
flat_list_Hbr_40y = [ -x for x in flat_list_Hbr_40y]
print(flat_list_Hbr_40y)
#%%
#Step (6): post-harvest processing of wood
#post-harvest wood processing
df1_Ac_7y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df1_Ac_18y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df1_Tgr_40y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
dfl_Tgr_60y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE_Hbr_40y = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
t = range(0,tf,1)
PH_Emissions_HWP1_Ac_7y = df1_Ac_7y['PH_Emissions_HWP'].values
PH_Emissions_HWP1_Ac_18y = df1_Ac_18y['PH_Emissions_HWP'].values
PH_Emissions_HWP1_Tgr_40y = df1_Tgr_40y['PH_Emissions_HWP'].values
PH_Emissions_HWP1_Tgr_60y = dfl_Tgr_60y['PH_Emissions_HWP'].values
PH_Emissions_HWPE_Hbr_40y = dfE_Hbr_40y ['PH_Emissions_HWP'].values
#%%
#Step (7_1): landfill gas decomposition (CH4)
#CH4 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1_Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Ac_7y(t,remainAGB_CH4_S1_Ac_7y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Ac_7y
#set zero matrix
output_decomp_CH4_S1_Ac_7y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Ac_7y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Ac_7y[i:,i] = decomp_CH4_S1_Ac_7y(t[:len(t)-i],remain_part_CH4_S1_Ac_7y)
print(output_decomp_CH4_S1_Ac_7y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Ac_7y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Ac_7y[:,i] = np.diff(output_decomp_CH4_S1_Ac_7y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
print(len(subs_matrix_CH4_S1_Ac_7y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Ac_7y = subs_matrix_CH4_S1_Ac_7y.clip(max=0)
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Ac_7y = abs(subs_matrix_CH4_S1_Ac_7y)
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Ac_7y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Ac_7y)
subs_matrix_CH4_S1_Ac_7y = np.vstack((zero_matrix_CH4_S1_Ac_7y, subs_matrix_CH4_S1_Ac_7y))
print(subs_matrix_CH4_S1_Ac_7y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Ac_7y = (tf,1)
decomp_tot_CH4_S1_Ac_7y = np.zeros(matrix_tot_CH4_S1_Ac_7y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Ac_7y[:,0] = decomp_tot_CH4_S1_Ac_7y[:,0] + subs_matrix_CH4_S1_Ac_7y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Ac_7y[:,0])
#S1_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Ac_18y(t,remainAGB_CH4_S1_Ac_18y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Ac_18y
#set zero matrix
output_decomp_CH4_S1_Ac_18y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Ac_18y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Ac_18y[i:,i] = decomp_CH4_S1_Ac_18y(t[:len(t)-i],remain_part_CH4_S1_Ac_18y)
print(output_decomp_CH4_S1_Ac_18y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Ac_18y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Ac_18y[:,i] = np.diff(output_decomp_CH4_S1_Ac_18y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
print(len(subs_matrix_CH4_S1_Ac_18y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Ac_18y = subs_matrix_CH4_S1_Ac_18y.clip(max=0)
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Ac_18y = abs(subs_matrix_CH4_S1_Ac_18y)
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Ac_18y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Ac_18y)
subs_matrix_CH4_S1_Ac_18y = np.vstack((zero_matrix_CH4_S1_Ac_18y, subs_matrix_CH4_S1_Ac_18y))
print(subs_matrix_CH4_S1_Ac_18y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Ac_18y = (tf,1)
decomp_tot_CH4_S1_Ac_18y = np.zeros(matrix_tot_CH4_S1_Ac_18y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Ac_18y[:,0] = decomp_tot_CH4_S1_Ac_18y[:,0] + subs_matrix_CH4_S1_Ac_18y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Ac_18y[:,0])
#S1_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Tgr_40y(t,remainAGB_CH4_S1_Tgr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Tgr_40y
#set zero matrix
output_decomp_CH4_S1_Tgr_40y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Tgr_40y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Tgr_40y[i:,i] = decomp_CH4_S1_Tgr_40y(t[:len(t)-i],remain_part_CH4_S1_Tgr_40y)
print(output_decomp_CH4_S1_Tgr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Tgr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Tgr_40y[:,i] = np.diff(output_decomp_CH4_S1_Tgr_40y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
print(len(subs_matrix_CH4_S1_Tgr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Tgr_40y = subs_matrix_CH4_S1_Tgr_40y.clip(max=0)
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Tgr_40y = abs(subs_matrix_CH4_S1_Tgr_40y)
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Tgr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Tgr_40y)
subs_matrix_CH4_S1_Tgr_40y = np.vstack((zero_matrix_CH4_S1_Tgr_40y, subs_matrix_CH4_S1_Tgr_40y))
print(subs_matrix_CH4_S1_Tgr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Tgr_40y = (tf,1)
decomp_tot_CH4_S1_Tgr_40y = np.zeros(matrix_tot_CH4_S1_Tgr_40y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Tgr_40y[:,0] = decomp_tot_CH4_S1_Tgr_40y[:,0] + subs_matrix_CH4_S1_Tgr_40y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Tgr_40y[:,0])
#S1_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
def decomp_CH4_S1_Tgr_60y(t,remainAGB_CH4_S1_Tgr_60y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_S1_Tgr_60y
#set zero matrix
output_decomp_CH4_S1_Tgr_60y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_S1_Tgr_60y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_S1_Tgr_60y[i:,i] = decomp_CH4_S1_Tgr_60y(t[:len(t)-i],remain_part_CH4_S1_Tgr_60y)
print(output_decomp_CH4_S1_Tgr_60y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_S1_Tgr_60y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_S1_Tgr_60y[:,i] = np.diff(output_decomp_CH4_S1_Tgr_60y[:,i])
i = i + 1
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
print(len(subs_matrix_CH4_S1_Tgr_60y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_S1_Tgr_60y = subs_matrix_CH4_S1_Tgr_60y.clip(max=0)
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_CH4_S1_Tgr_60y = abs(subs_matrix_CH4_S1_Tgr_60y)
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_S1_Tgr_60y)
subs_matrix_CH4_S1_Tgr_60y = np.vstack((zero_matrix_CH4_S1_Tgr_60y, subs_matrix_CH4_S1_Tgr_60y))
print(subs_matrix_CH4_S1_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_S1_Tgr_60y = (tf,1)
decomp_tot_CH4_S1_Tgr_60y = np.zeros(matrix_tot_CH4_S1_Tgr_60y)
i = 0
while i < tf:
decomp_tot_CH4_S1_Tgr_60y[:,0] = decomp_tot_CH4_S1_Tgr_60y[:,0] + subs_matrix_CH4_S1_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_CH4_S1_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_CH4_E_Hbr_40y(t,remainAGB_CH4_E_Hbr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_CH4_E_Hbr_40y
#set zero matrix
output_decomp_CH4_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CH4'].values)))
for i,remain_part_CH4_E_Hbr_40y in enumerate(df['Landfill_decomp_CH4'].values):
#print(i,remain_part)
output_decomp_CH4_E_Hbr_40y[i:,i] = decomp_CH4_E_Hbr_40y(t[:len(t)-i],remain_part_CH4_E_Hbr_40y)
print(output_decomp_CH4_E_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CH4'].values-1)))
i = 0
while i < tf:
subs_matrix_CH4_E_Hbr_40y[:,i] = np.diff(output_decomp_CH4_E_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
print(len(subs_matrix_CH4_E_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_CH4_E_Hbr_40y = subs_matrix_CH4_E_Hbr_40y.clip(max=0)
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_CH4_E_Hbr_40y = abs(subs_matrix_CH4_E_Hbr_40y)
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_CH4_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CH4'].values)))
print(zero_matrix_CH4_E_Hbr_40y)
subs_matrix_CH4_E_Hbr_40y = np.vstack((zero_matrix_CH4_E_Hbr_40y, subs_matrix_CH4_E_Hbr_40y))
print(subs_matrix_CH4_E_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_CH4_E_Hbr_40y = (tf,1)
decomp_tot_CH4_E_Hbr_40y = np.zeros(matrix_tot_CH4_E_Hbr_40y)
i = 0
while i < tf:
decomp_tot_CH4_E_Hbr_40y[:,0] = decomp_tot_CH4_E_Hbr_40y[:,0] + subs_matrix_CH4_E_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_CH4_E_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CH4_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_CH4_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_CH4_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_CH4_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_CH4_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (7_2): landfill gas decomposition (CO2)
#CO2 decomposition
hl = 20 #half-live
k = (np.log(2))/hl
#S1_Ac_7y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_7y(t,remainAGB_S1_Ac_7y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_7y
#set zero matrix
output_decomp_S1_Ac_7y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Ac_7y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Ac_7y[i:,i] = decomp_S1_Ac_7y(t[:len(t)-i],remain_part_S1_Ac_7y)
print(output_decomp_S1_Ac_7y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_7y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_7y[:,i] = np.diff(output_decomp_S1_Ac_7y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_7y[:,:4])
print(len(subs_matrix_S1_Ac_7y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_7y = subs_matrix_S1_Ac_7y.clip(max=0)
print(subs_matrix_S1_Ac_7y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_7y = abs(subs_matrix_S1_Ac_7y)
print(subs_matrix_S1_Ac_7y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_7y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Ac_7y)
subs_matrix_S1_Ac_7y = np.vstack((zero_matrix_S1_Ac_7y, subs_matrix_S1_Ac_7y))
print(subs_matrix_S1_Ac_7y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_7y = (tf,1)
decomp_tot_CO2_S1_Ac_7y = np.zeros(matrix_tot_S1_Ac_7y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Ac_7y[:,0] = decomp_tot_CO2_S1_Ac_7y[:,0] + subs_matrix_S1_Ac_7y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Ac_7y[:,0])
#S1_Ac_18y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
tf = 201
t = np.arange(tf)
def decomp_S1_Ac_18y(t,remainAGB_S1_Ac_18y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Ac_18y
#set zero matrix
output_decomp_S1_Ac_18y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Ac_18y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Ac_18y[i:,i] = decomp_S1_Ac_18y(t[:len(t)-i],remain_part_S1_Ac_18y)
print(output_decomp_S1_Ac_18y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Ac_18y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Ac_18y[:,i] = np.diff(output_decomp_S1_Ac_18y[:,i])
i = i + 1
print(subs_matrix_S1_Ac_18y[:,:4])
print(len(subs_matrix_S1_Ac_18y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Ac_18y = subs_matrix_S1_Ac_18y.clip(max=0)
print(subs_matrix_S1_Ac_18y[:,:4])
#make the results as absolute values
subs_matrix_S1_Ac_18y = abs(subs_matrix_S1_Ac_18y)
print(subs_matrix_S1_Ac_18y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Ac_18y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Ac_18y)
subs_matrix_S1_Ac_18y = np.vstack((zero_matrix_S1_Ac_18y, subs_matrix_S1_Ac_18y))
print(subs_matrix_S1_Ac_18y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Ac_18y = (tf,1)
decomp_tot_CO2_S1_Ac_18y = np.zeros(matrix_tot_S1_Ac_18y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Ac_18y[:,0] = decomp_tot_CO2_S1_Ac_18y[:,0] + subs_matrix_S1_Ac_18y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Ac_18y[:,0])
#S1_Tgr_40y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_40y(t,remainAGB_S1_Tgr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_40y
#set zero matrix
output_decomp_S1_Tgr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Tgr_40y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_40y[i:,i] = decomp_S1_Tgr_40y(t[:len(t)-i],remain_part_S1_Tgr_40y)
print(output_decomp_S1_Tgr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_40y[:,i] = np.diff(output_decomp_S1_Tgr_40y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_40y[:,:4])
print(len(subs_matrix_S1_Tgr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_40y = subs_matrix_S1_Tgr_40y.clip(max=0)
print(subs_matrix_S1_Tgr_40y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_40y = abs(subs_matrix_S1_Tgr_40y)
print(subs_matrix_S1_Tgr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Tgr_40y)
subs_matrix_S1_Tgr_40y = np.vstack((zero_matrix_S1_Tgr_40y, subs_matrix_S1_Tgr_40y))
print(subs_matrix_S1_Tgr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_40y = (tf,1)
decomp_tot_CO2_S1_Tgr_40y = np.zeros(matrix_tot_S1_Tgr_40y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Tgr_40y[:,0] = decomp_tot_CO2_S1_Tgr_40y[:,0] + subs_matrix_S1_Tgr_40y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Tgr_40y[:,0])
#S2_Tgr_60y
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
tf = 201
t = np.arange(tf)
def decomp_S1_Tgr_60y(t,remainAGB_S1_Tgr_60y):
return (1-(1-np.exp(-k*t)))*remainAGB_S1_Tgr_60y
#set zero matrix
output_decomp_S1_Tgr_60y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_S1_Tgr_60y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_S1_Tgr_60y[i:,i] = decomp_S1_Tgr_60y(t[:len(t)-i],remain_part_S1_Tgr_60y)
print(output_decomp_S1_Tgr_60y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_S1_Tgr_60y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_S1_Tgr_60y[:,i] = np.diff(output_decomp_S1_Tgr_60y[:,i])
i = i + 1
print(subs_matrix_S1_Tgr_60y[:,:4])
print(len(subs_matrix_S1_Tgr_60y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_S1_Tgr_60y = subs_matrix_S1_Tgr_60y.clip(max=0)
print(subs_matrix_S1_Tgr_60y[:,:4])
#make the results as absolute values
subs_matrix_S1_Tgr_60y = abs(subs_matrix_S1_Tgr_60y)
print(subs_matrix_S1_Tgr_60y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_S1_Tgr_60y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_S1_Tgr_60y)
subs_matrix_S1_Tgr_60y = np.vstack((zero_matrix_S1_Tgr_60y, subs_matrix_S1_Tgr_60y))
print(subs_matrix_S1_Tgr_60y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_S1_Tgr_60y = (tf,1)
decomp_tot_CO2_S1_Tgr_60y = np.zeros(matrix_tot_S1_Tgr_60y)
i = 0
while i < tf:
decomp_tot_CO2_S1_Tgr_60y[:,0] = decomp_tot_CO2_S1_Tgr_60y[:,0] + subs_matrix_S1_Tgr_60y[:,i]
i = i + 1
print(decomp_tot_CO2_S1_Tgr_60y[:,0])
#E
df = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
tf = 201
t = np.arange(tf)
def decomp_E_Hbr_40y(t,remainAGB_E_Hbr_40y):
return (1-(1-np.exp(-k*t)))*remainAGB_E_Hbr_40y
#set zero matrix
output_decomp_E_Hbr_40y = np.zeros((len(t),len(df['Landfill_decomp_CO2'].values)))
for i,remain_part_E_Hbr_40y in enumerate(df['Landfill_decomp_CO2'].values):
#print(i,remain_part)
output_decomp_E_Hbr_40y[i:,i] = decomp_E_Hbr_40y(t[:len(t)-i],remain_part_E_Hbr_40y)
print(output_decomp_E_Hbr_40y[:,:4])
#find the yearly emissions from decomposition by calculating the differences between elements in list 'decomp_tot_S1'
#(https://stackoverflow.com/questions/5314241/difference-between-consecutive-elements-in-list)
# https://stackoverflow.com/questions/11095892/numpy-difference-between-neighboring-elements
#difference between element,
subs_matrix_E_Hbr_40y = np.zeros((len(t)-1,len(df['Landfill_decomp_CO2'].values-1)))
i = 0
while i < tf:
subs_matrix_E_Hbr_40y[:,i] = np.diff(output_decomp_E_Hbr_40y[:,i])
i = i + 1
print(subs_matrix_E_Hbr_40y[:,:4])
print(len(subs_matrix_E_Hbr_40y))
#since there is no carbon emission from decomposition at the beginning of the year (esp. from 'year 1' onward),
#we have to replace the positive numbers with 0 values (https://stackoverflow.com/questions/36310897/how-do-i-change-all-negative-numbers-to-zero-in-python/36310913)
subs_matrix_E_Hbr_40y = subs_matrix_E_Hbr_40y.clip(max=0)
print(subs_matrix_E_Hbr_40y[:,:4])
#make the results as absolute values
subs_matrix_E_Hbr_40y = abs(subs_matrix_E_Hbr_40y)
print(subs_matrix_E_Hbr_40y[:,:4])
#insert row of zeros into the first row of the subs_matrix
zero_matrix_E_Hbr_40y = np.zeros((len(t)-200,len(df['Landfill_decomp_CO2'].values)))
print(zero_matrix_E_Hbr_40y)
subs_matrix_E_Hbr_40y = np.vstack((zero_matrix_E_Hbr_40y, subs_matrix_E_Hbr_40y))
print(subs_matrix_E_Hbr_40y[:,:4])
#sum every column of the subs_matrix into one vector matrix
matrix_tot_E_Hbr_40y = (tf,1)
decomp_tot_CO2_E_Hbr_40y = np.zeros(matrix_tot_E_Hbr_40y)
i = 0
while i < tf:
decomp_tot_CO2_E_Hbr_40y[:,0] = decomp_tot_CO2_E_Hbr_40y[:,0] + subs_matrix_E_Hbr_40y[:,i]
i = i + 1
print(decomp_tot_CO2_E_Hbr_40y[:,0])
#plotting
t = np.arange(0,tf)
plt.plot(t,decomp_tot_CO2_S1_Ac_7y,label='Ac_7y')
plt.plot(t,decomp_tot_CO2_S1_Ac_18y,label='Ac_18y')
plt.plot(t,decomp_tot_CO2_S1_Tgr_40y,label='Tgr_40y')
plt.plot(t,decomp_tot_CO2_S1_Tgr_60y,label='Tgr_60y')
plt.plot(t,decomp_tot_CO2_E_Hbr_40y,label='E_Hbr_40y')
plt.xlim(0,200)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.show()
#%%
#Step (8): Sum the emissions and sequestration (net carbon balance), CO2 and CH4 are separated
#https://stackoverflow.com/questions/52703442/python-sum-values-from-multiple-lists-more-than-two
#C_loss + C_remainAGB + C_remainHWP + PH_Emissions_PO
Emissions_S1_Ac_7y = [c_firewood_energy_S1_Ac7, decomp_tot_S1_Ac_7y, TestDSM1_Ac7.o, PH_Emissions_HWP1_Ac_7y, decomp_tot_CO2_S1_Ac_7y[:,0]]
Emissions_S1_Ac_18y = [c_firewood_energy_S1_Ac18, decomp_tot_S1_Ac_18y, TestDSM1_Ac18.o, PH_Emissions_HWP1_Ac_18y, decomp_tot_CO2_S1_Ac_18y[:,0]]
Emissions_S1_Tgr_40y = [c_firewood_energy_S1_Tgr40, decomp_tot_S1_Tgr_40y, TestDSM1_Tgr40.o, PH_Emissions_HWP1_Tgr_40y, decomp_tot_CO2_S1_Tgr_40y[:,0]]
Emissions_S1_Tgr_60y = [c_firewood_energy_S1_Tgr60, decomp_tot_S1_Tgr_60y, TestDSM1_Tgr60.o, PH_Emissions_HWP1_Tgr_60y, decomp_tot_CO2_S1_Tgr_60y[:,0]]
Emissions_E_Hbr_40y = [c_firewood_energy_E_Hbr40, c_pellets_Hbr_40y, decomp_tot_E_Hbr_40y, TestDSME_Hbr40.o, PH_Emissions_HWPE_Hbr_40y, decomp_tot_CO2_E_Hbr_40y[:,0]]
Emissions_DL_FP_S1_Ac_7y = [sum(x) for x in zip(*Emissions_S1_Ac_7y)]
Emissions_DL_FP_S1_Ac_18y = [sum(x) for x in zip(*Emissions_S1_Ac_18y)]
Emissions_DL_FP_S1_Tgr_40y = [sum(x) for x in zip(*Emissions_S1_Tgr_40y)]
Emissions_DL_FP_S1_Tgr_60y = [sum(x) for x in zip(*Emissions_S1_Tgr_60y)]
Emissions_DL_FP_E_Hbr_40y = [sum(x) for x in zip(*Emissions_E_Hbr_40y)]
#CH4_S1_Ac_7y
Emissions_CH4_DL_FP_S1_Ac_7y = decomp_tot_CH4_S1_Ac_7y[:,0]
#CH4_S1_Ac_18y
Emissions_CH4_DL_FP_S1_Ac_18y = decomp_tot_CH4_S1_Ac_18y[:,0]
#CH4_S1_Tgr_40y
Emissions_CH4_DL_FP_S1_Tgr_40y = decomp_tot_CH4_S1_Tgr_40y[:,0]
#CH4_S1_Tgr_60y
Emissions_CH4_DL_FP_S1_Tgr_60y = decomp_tot_CH4_S1_Tgr_60y[:,0]
#CH4_E_Hbr_40y
Emissions_CH4_DL_FP_E_Hbr_40y = decomp_tot_CH4_E_Hbr_40y[:,0]
#%%
#Step (9): Generate the excel file (emissions_seq_scenarios.xlsx) from Step (8) calculation
#print year column
year = []
for x in range (0, tf):
year.append(x)
print (year)
#print CH4 emission column
import itertools
lst = [0]
Emissions_CH4 = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst))
print(Emissions_CH4)
#print emission ref
lst1 = [0]
Emission_ref = list(itertools.chain.from_iterable(itertools.repeat(x, tf) for x in lst1))
print(Emission_ref)
#replace the first element with 1 to denote the emission reference as year 0 (for dynGWP calculation)
Emission_ref[0] = 1
print(Emission_ref)
Col1 = year
Col2_S1_Ac_7y = Emissions_DL_FP_S1_Ac_7y
Col2_S1_Ac_18y = Emissions_DL_FP_S1_Ac_18y
Col2_S1_Tgr_40y = Emissions_DL_FP_S1_Tgr_40y
Col2_S1_Tgr_60y = Emissions_DL_FP_S1_Tgr_60y
Col2_E_Hbr_40y = Emissions_DL_FP_E_Hbr_40y
Col3_S1_Ac_7y = Emissions_CH4_DL_FP_S1_Ac_7y
Col3_S1_Ac_18y = Emissions_CH4_DL_FP_S1_Ac_18y
Col3_S1_Tgr_40y = Emissions_CH4_DL_FP_S1_Tgr_40y
Col3_S1_Tgr_60y = Emissions_CH4_DL_FP_S1_Tgr_60y
Col3_E_Hbr_40y = Emissions_CH4_DL_FP_E_Hbr_40y
Col4 = Emission_ref
Col5 = flat_list_Ac_7y
Col6 = flat_list_Ac_18y
Col7 = flat_list_Tgr_40y
Col8 = flat_list_Tgr_60y
Col9 = flat_list_Hbr_40y
#A. crassicarpa
df1_Ac_7y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_7y,'kg_CH4':Col3_S1_Ac_7y,'kg_CO2_seq':Col5,'emission_ref':Col4})
df1_Ac_18y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Ac_18y,'kg_CH4':Col3_S1_Ac_18y,'kg_CO2_seq':Col6,'emission_ref':Col4})
#T. grandis
df1_Tgr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_40y,'kg_CH4':Col3_S1_Tgr_40y,'kg_CO2_seq':Col7,'emission_ref':Col4})
df1_Tgr_60y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_S1_Tgr_60y,'kg_CH4':Col3_S1_Tgr_60y,'kg_CO2_seq':Col8,'emission_ref':Col4})
#H. brasiliensis
dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Col1,'kg_CO2':Col2_E_Hbr_40y,'kg_CH4':Col3_E_Hbr_40y,'kg_CO2_seq':Col9,'emission_ref':Col4})
writer = pd.ExcelWriter('emissions_seq_DL_FP_EC_RB.xlsx', engine = 'xlsxwriter')
df1_Ac_7y.to_excel(writer, sheet_name = 'DL_FP_S1_Ac_7y', header=True, index=False )
df1_Ac_18y.to_excel(writer, sheet_name = 'DL_FP_S1_Ac_18y', header=True, index=False)
df1_Tgr_40y.to_excel(writer, sheet_name = 'DL_FP_S1_Tgr_40y', header=True, index=False)
df1_Tgr_60y.to_excel(writer, sheet_name = 'DL_FP_S1_Tgr_60y', header=True, index=False)
dfE_Hbr_40y.to_excel(writer, sheet_name = 'DL_FP_E_Hbr_40y', header=True, index=False)
writer.save()
writer.close()
#df1.to_excel('test.xlsx', 'nuclues', header=True, index=False)
#df2.to_excel('test.xlsx', 'plasma', header=True, index=False)
#%%
## DYNAMIC LCA
# Step (10): Set General Parameters for Dynamic LCA calculation
aCH4 = 0.129957e-12; # methane - instantaneous radiative forcing per unit mass [W/m2 /kgCH4]
TauCH4 = 12; # methane - lifetime (years)
aCO2 = 0.0018088e-12; # CO2 - instantaneous radiative forcing per unit mass [W/m2 /kgCO2]
TauCO2 = [172.9, 18.51, 1.186]; # CO2 parameters according to Bern carbon cycle-climate model
aBern = [0.259, 0.338, 0.186]; # CO2 parameters according to Bern carbon cycle-climate model
a0Bern = 0.217; # CO2 parameters according to Bern carbon cycle-climate model
tf = 202 #until 202 because we want to get the DCF(t-i) until DCF(201) to determine the impact from the emission from the year 200 (There is no DCF(0))
#%%
#Step (11): Bern 2.5 CC Model, determine atmospheric load (C(t)) for GHG (CO2 and CH4)
t = range(0,tf,1)
## CO2 calculation formula
# time dependant atmospheric load for CO2, Bern model
def C_CO2(t):
return a0Bern + aBern[0]*np.exp(-t/TauCO2[0]) + aBern[1]*np.exp(-t/TauCO2[1]) + aBern[2]*np.exp(-t/TauCO2[2])
output_CO2 = np.array([C_CO2(ti) for ti in t])
print(output_CO2)
## CH4 calculation formula
# time dependant atmospheric load for non-CO2 GHGs (Methane)
def C_CH4(t):
return np.exp(-t/TauCH4)
output_CH4 = np.array([C_CH4(ti) for ti in t])
plt.xlim([0, 200])
plt.ylim([0,1.1])
plt.plot(t, output_CO2, output_CH4)
plt.xlabel('Time (year)')
plt.ylabel('Fraction of CO$_2$')
plt.show()
output_CH4.size
#%%
#determine the C(t) for CO2
s = []
t = np.arange(0,tf,1)
for i in t:
s.append(quad(C_CO2,i-1,i))
res_list_CO2 = [x[0] for x in s]
len(res_list_CO2)
#%%
#determine the C(t) for CH4
s = []
for i in t:
s.append(quad(C_CH4,i-1,i))
res_list_CH4 = [p[0] for p in s]
#plot
plt.xlim([0, 200])
plt.ylim([0,1.5])
plt.plot(t, res_list_CO2, res_list_CH4)
plt.show()
#%%
#Step (12): Determine dynamic characterization factors (DCF) for CO2 and CH4
DCF_inst_CO2 = aCO2 * np.array(res_list_CO2)
print(DCF_inst_CO2)
DCF_inst_CH4 = aCH4 * np.array(res_list_CH4)
plt.xlim([0, 200])
plt.ylim([0,4e-15])
plt.plot(t, DCF_inst_CO2, DCF_inst_CH4)
plt.xlabel('Time (year)')
plt.ylabel('DCF_inst (10$^{-15}$ W/m$^2$.kg CO$_2$)')
plt.show()
len(DCF_inst_CO2)
#%%
#Step (13): import emission data from emissions_seq_scenarios.xlsx (Step (9))
##wood-based
#read S1_Ac_7y
df = pd.read_excel('emissions_seq_DL_FP_EC_RB.xlsx', 'DL_FP_S1_Ac_7y') # can also index sheet by name or fetch all sheets
emission_CO2_S1_Ac_7y = df['kg_CO2'].tolist()
emission_CH4_S1_Ac_7y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Ac_7y = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S1_Ac_18y
df = pd.read_excel('emissions_seq_DL_FP_EC_RB.xlsx', 'DL_FP_S1_Ac_18y')
emission_CO2_S1_Ac_18y = df['kg_CO2'].tolist()
emission_CH4_S1_Ac_18y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Ac_18y = df['kg_CO2_seq'].tolist()
#read S1_Tgr_40y
df = pd.read_excel('emissions_seq_DL_FP_EC_RB.xlsx', 'DL_FP_S1_Tgr_40y') # can also index sheet by name or fetch all sheets
emission_CO2_S1_Tgr_40y = df['kg_CO2'].tolist()
emission_CH4_S1_Tgr_40y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Tgr_40y = df['kg_CO2_seq'].tolist()
#read S1_Tgr_60y
df = pd.read_excel('emissions_seq_DL_FP_EC_RB.xlsx', 'DL_FP_S1_Tgr_60y')
emission_CO2_S1_Tgr_60y = df['kg_CO2'].tolist()
emission_CH4_S1_Tgr_60y = df['kg_CH4'].tolist()
emission_CO2_seq_S1_Tgr_60y = df['kg_CO2_seq'].tolist()
#read E_Hbr_40y
df = pd.read_excel('emissions_seq_DL_FP_EC_RB.xlsx', 'DL_FP_E_Hbr_40y') # can also index sheet by name or fetch all sheets
emission_CO2_E_Hbr_40y = df['kg_CO2'].tolist()
emission_CH4_E_Hbr_40y = df['kg_CH4'].tolist()
emission_CO2_seq_E_Hbr_40y = df['kg_CO2_seq'].tolist()
#%%
#Step (14): import emission data from the counter-use of non-renewable materials/energy scenarios (NR)
#read S1_Ac_7y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
emissions_NonRW_S1_Ac_7y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Ac_7y_seq = df['kg_CO2_seq'].tolist()
emission_CO2_ref = df['emission_ref'].tolist()
#read S1_Ac_18y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
emissions_NonRW_S1_Ac_18y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Ac_18y_seq = df['kg_CO2_seq'].tolist()
#read S1_Tgr_40y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Tgr_40y') # can also index sheet by name or fetch all sheets
emissions_NonRW_S1_Tgr_40y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Tgr_40y_seq = df['kg_CO2_seq'].tolist()
#read S1_Tgr_60y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
emissions_NonRW_S1_Tgr_60y = df['NonRW_emissions'].tolist()
emissions_NonRW_S1_Tgr_60y_seq = df['kg_CO2_seq'].tolist()
#read E_Hbr_40y
df = pd.read_excel('NonRW_DL_FP.xlsx', 'DL_FP_E_Hbr_40y') # can also index sheet by name or fetch all sheets
emissions_NonRW_E_Hbr_40y = df['NonRW_emissions'].tolist()
emissions_NonRW_E_Hbr_40y_seq = df['kg_CO2_seq'].tolist()
#%%
#Step (15): Determine the time elapsed dynamic characterization factors, DCF(t-ti), for CO2 and CH4
#DCF(t-i) CO2
matrix = (tf-1,tf-1)
DCF_CO2_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CO2_ti[i+1,t] = DCF_inst_CO2[t-i]
i = i + 1
print(DCF_CO2_ti)
#sns.heatmap(DCF_CO2_ti)
DCF_CO2_ti.shape
#DCF(t-i) CH4
matrix = (tf-1,tf-1)
DCF_CH4_ti = np.zeros(matrix)
for t in range(0,tf-1):
i = -1
while i < t:
DCF_CH4_ti[i+1,t] = DCF_inst_CH4[t-i]
i = i + 1
print(DCF_CH4_ti)
#sns.heatmap(DCF_CH4_ti)
DCF_CH4_ti.shape
#%%
# Step (16): Calculate instantaneous global warming impact (GWI)
##Wood-based
#S1_Ac_7y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Ac_7y = (tf-1,3)
GWI_inst_S1_Ac_7y = np.zeros(matrix_GWI_S1_Ac_7y)
for t in range(0,tf-1):
GWI_inst_S1_Ac_7y[t,0] = np.sum(np.multiply(emission_CO2_S1_Ac_7y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Ac_7y[t,1] = np.sum(np.multiply(emission_CH4_S1_Ac_7y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Ac_7y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Ac_7y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Ac_7y = (tf-1,1)
GWI_inst_tot_S1_Ac_7y = np.zeros(matrix_GWI_tot_S1_Ac_7y)
GWI_inst_tot_S1_Ac_7y[:,0] = np.array(GWI_inst_S1_Ac_7y[:,0] + GWI_inst_S1_Ac_7y[:,1] + GWI_inst_S1_Ac_7y[:,2])
print(GWI_inst_tot_S1_Ac_7y[:,0])
t = np.arange(0,tf-1,1)
#S1_Ac_18y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Ac_18y = (tf-1,3)
GWI_inst_S1_Ac_18y = np.zeros(matrix_GWI_S1_Ac_18y)
for t in range(0,tf-1):
GWI_inst_S1_Ac_18y[t,0] = np.sum(np.multiply(emission_CO2_S1_Ac_18y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Ac_18y[t,1] = np.sum(np.multiply(emission_CH4_S1_Ac_18y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Ac_18y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Ac_18y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Ac_18y = (tf-1,1)
GWI_inst_tot_S1_Ac_18y = np.zeros(matrix_GWI_tot_S1_Ac_18y)
GWI_inst_tot_S1_Ac_18y[:,0] = np.array(GWI_inst_S1_Ac_18y[:,0] + GWI_inst_S1_Ac_18y[:,1] + GWI_inst_S1_Ac_18y[:,2])
print(GWI_inst_tot_S1_Ac_18y[:,0])
#S1_Tgr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Tgr_40y = (tf-1,3)
GWI_inst_S1_Tgr_40y = np.zeros(matrix_GWI_S1_Tgr_40y)
for t in range(0,tf-1):
GWI_inst_S1_Tgr_40y[t,0] = np.sum(np.multiply(emission_CO2_S1_Tgr_40y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Tgr_40y[t,1] = np.sum(np.multiply(emission_CH4_S1_Tgr_40y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Tgr_40y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Tgr_40y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Tgr_40y = (tf-1,1)
GWI_inst_tot_S1_Tgr_40y = np.zeros(matrix_GWI_tot_S1_Tgr_40y)
GWI_inst_tot_S1_Tgr_40y[:,0] = np.array(GWI_inst_S1_Tgr_40y[:,0] + GWI_inst_S1_Tgr_40y[:,1] + GWI_inst_S1_Tgr_40y[:,2])
print(GWI_inst_tot_S1_Tgr_40y[:,0])
#S1_Tgr_60y
t = np.arange(0,tf-1,1)
matrix_GWI_S1_Tgr_60y = (tf-1,3)
GWI_inst_S1_Tgr_60y = np.zeros(matrix_GWI_S1_Tgr_60y)
for t in range(0,tf-1):
GWI_inst_S1_Tgr_60y[t,0] = np.sum(np.multiply(emission_CO2_S1_Tgr_60y,DCF_CO2_ti[:,t]))
GWI_inst_S1_Tgr_60y[t,1] = np.sum(np.multiply(emission_CH4_S1_Tgr_60y,DCF_CH4_ti[:,t]))
GWI_inst_S1_Tgr_60y[t,2] = np.sum(np.multiply(emission_CO2_seq_S1_Tgr_60y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_S1_Tgr_60y = (tf-1,1)
GWI_inst_tot_S1_Tgr_60y = np.zeros(matrix_GWI_tot_S1_Tgr_60y)
GWI_inst_tot_S1_Tgr_60y[:,0] = np.array(GWI_inst_S1_Tgr_60y[:,0] + GWI_inst_S1_Tgr_60y[:,1] + GWI_inst_S1_Tgr_60y[:,2])
print(GWI_inst_tot_S1_Tgr_60y[:,0])
#E_Hbr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_E_Hbr_40y = (tf-1,3)
GWI_inst_E_Hbr_40y = np.zeros(matrix_GWI_E_Hbr_40y)
for t in range(0,tf-1):
GWI_inst_E_Hbr_40y[t,0] = np.sum(np.multiply(emission_CO2_E_Hbr_40y,DCF_CO2_ti[:,t]))
GWI_inst_E_Hbr_40y[t,1] = np.sum(np.multiply(emission_CH4_E_Hbr_40y,DCF_CH4_ti[:,t]))
GWI_inst_E_Hbr_40y[t,2] = np.sum(np.multiply(emission_CO2_seq_E_Hbr_40y,DCF_CO2_ti[:,t]))
matrix_GWI_tot_E_Hbr_40y = (tf-1,1)
GWI_inst_tot_E_Hbr_40y = np.zeros(matrix_GWI_tot_E_Hbr_40y)
GWI_inst_tot_E_Hbr_40y[:,0] = np.array(GWI_inst_E_Hbr_40y[:,0] + GWI_inst_E_Hbr_40y[:,1] + GWI_inst_E_Hbr_40y[:,2])
print(GWI_inst_tot_E_Hbr_40y[:,0])
##NonRW
#S1_Ac_7y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Ac_7y = (tf-1,2)
GWI_inst_NonRW_S1_Ac_7y = np.zeros(matrix_GWI_NonRW_S1_Ac_7y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Ac_7y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Ac_7y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Ac_7y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Ac_7y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Ac_7y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Ac_7y = np.zeros(matrix_GWI_tot_NonRW_S1_Ac_7y)
GWI_inst_tot_NonRW_S1_Ac_7y[:,0] = np.array(GWI_inst_NonRW_S1_Ac_7y[:,0] + GWI_inst_NonRW_S1_Ac_7y[:,1])
print(GWI_inst_tot_NonRW_S1_Ac_7y[:,0])
#S1_Ac_18y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Ac_18y = (tf-1,2)
GWI_inst_NonRW_S1_Ac_18y = np.zeros(matrix_GWI_NonRW_S1_Ac_18y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Ac_18y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Ac_18y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Ac_18y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Ac_18y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Ac_18y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Ac_18y = np.zeros(matrix_GWI_tot_NonRW_S1_Ac_18y)
GWI_inst_tot_NonRW_S1_Ac_18y[:,0] = np.array(GWI_inst_NonRW_S1_Ac_18y[:,0] + GWI_inst_NonRW_S1_Ac_18y[:,1])
print(GWI_inst_tot_NonRW_S1_Ac_18y[:,0])
#S1_Tgr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Tgr_40y = (tf-1,2)
GWI_inst_NonRW_S1_Tgr_40y = np.zeros(matrix_GWI_NonRW_S1_Tgr_40y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Tgr_40y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_40y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Tgr_40y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_40y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Tgr_40y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Tgr_40y = np.zeros(matrix_GWI_tot_NonRW_S1_Tgr_40y)
GWI_inst_tot_NonRW_S1_Tgr_40y[:,0] = np.array(GWI_inst_NonRW_S1_Tgr_40y[:,0] + GWI_inst_NonRW_S1_Tgr_40y[:,1])
print(GWI_inst_tot_NonRW_S1_Tgr_40y[:,0])
#S1_Tgr_60y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_S1_Tgr_60y = (tf-1,2)
GWI_inst_NonRW_S1_Tgr_60y = np.zeros(matrix_GWI_NonRW_S1_Tgr_60y)
for t in range(0,tf-1):
GWI_inst_NonRW_S1_Tgr_60y[t,0] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_60y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_S1_Tgr_60y[t,1] = np.sum(np.multiply(emissions_NonRW_S1_Tgr_60y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_S1_Tgr_60y = (tf-1,1)
GWI_inst_tot_NonRW_S1_Tgr_60y = np.zeros(matrix_GWI_tot_NonRW_S1_Tgr_60y)
GWI_inst_tot_NonRW_S1_Tgr_60y[:,0] = np.array(GWI_inst_NonRW_S1_Tgr_60y[:,0] + GWI_inst_NonRW_S1_Tgr_60y[:,1])
print(GWI_inst_tot_NonRW_S1_Tgr_60y[:,0])
#E_Hbr_40y
t = np.arange(0,tf-1,1)
matrix_GWI_NonRW_E_Hbr_40y = (tf-1,2)
GWI_inst_NonRW_E_Hbr_40y = np.zeros(matrix_GWI_NonRW_E_Hbr_40y)
for t in range(0,tf-1):
GWI_inst_NonRW_E_Hbr_40y[t,0] = np.sum(np.multiply(emissions_NonRW_E_Hbr_40y,DCF_CO2_ti[:,t]))
GWI_inst_NonRW_E_Hbr_40y[t,1] = np.sum(np.multiply(emissions_NonRW_E_Hbr_40y_seq,DCF_CO2_ti[:,t]))
matrix_GWI_tot_NonRW_E_Hbr_40y = (tf-1,1)
GWI_inst_tot_NonRW_E_Hbr_40y = np.zeros(matrix_GWI_tot_NonRW_E_Hbr_40y)
GWI_inst_tot_NonRW_E_Hbr_40y[:,0] = np.array(GWI_inst_NonRW_E_Hbr_40y[:,0] + GWI_inst_NonRW_E_Hbr_40y[:,1])
print(GWI_inst_tot_NonRW_E_Hbr_40y[:,0])
t = np.arange(0,tf-1,1)
#create zero list to highlight the horizontal line for 0
def zerolistmaker(n):
listofzeros = [0] * (n)
return listofzeros
#convert to flat list
GWI_inst_tot_NonRW_S1_Ac_7y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Ac_7y for item in sublist])
GWI_inst_tot_NonRW_S1_Ac_18y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Ac_18y for item in sublist])
GWI_inst_tot_NonRW_S1_Tgr_60y = np.array([item for sublist in GWI_inst_tot_NonRW_S1_Tgr_60y for item in sublist])
GWI_inst_tot_NonRW_E_Hbr_40y = np.array([item for sublist in GWI_inst_tot_NonRW_E_Hbr_40y for item in sublist])
GWI_inst_tot_S1_Ac_7y = np.array([item for sublist in GWI_inst_tot_S1_Ac_7y for item in sublist])
GWI_inst_tot_S1_Ac_18y = np.array([item for sublist in GWI_inst_tot_S1_Ac_18y for item in sublist])
GWI_inst_tot_S1_Tgr_60y = np.array([item for sublist in GWI_inst_tot_S1_Tgr_60y for item in sublist])
GWI_inst_tot_E_Hbr_40y = np.array([item for sublist in GWI_inst_tot_E_Hbr_40y for item in sublist])
plt.plot(t, GWI_inst_tot_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55)
#plt.plot(t, GWI_inst_tot_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_inst_tot_S1_Ac_7y, color='olive', label='M_EC_Ac_7y')
plt.plot(t, GWI_inst_tot_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y')
#plt.plot(t, GWI_inst_tot_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y')
plt.plot(t, GWI_inst_tot_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y')
plt.plot(t, GWI_inst_tot_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWI_inst_tot_NonRW_E_Hbr_40y, GWI_inst_tot_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3)
#plt.fill_between(t, GWI_inst_tot_NonRW_S1_Ac_7y, GWI_inst_tot_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3)
plt.grid(True)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.xlim(0,200)
plt.ylim(-1e-9,1.4e-9)
plt.title('Instantaneous GWI, DL_FP_EC')
plt.xlabel('Time (year)')
#plt.ylabel('GWI_inst (10$^{-12}$ W/m$^2$)')
plt.ylabel('GWI_inst (W/m$^2$)')#
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_inst_NonRW_DL_FP_S1', dpi=300)
plt.show()
#%%
#Step (17): Calculate cumulative global warming impact (GWI)
##Wood-based
GWI_cum_S1_Ac_7y = np.cumsum(GWI_inst_tot_S1_Ac_7y)
GWI_cum_S1_Ac_18y = np.cumsum(GWI_inst_tot_S1_Ac_18y)
GWI_cum_S1_Tgr_40y = np.cumsum(GWI_inst_tot_S1_Tgr_40y)
GWI_cum_S1_Tgr_60y = np.cumsum(GWI_inst_tot_S1_Tgr_60y)
GWI_cum_E_Hbr_40y = np.cumsum(GWI_inst_tot_E_Hbr_40y)
##NonRW
GWI_cum_NonRW_S1_Ac_7y = np.cumsum(GWI_inst_tot_NonRW_S1_Ac_7y)
GWI_cum_NonRW_S1_Ac_18y = np.cumsum(GWI_inst_tot_NonRW_S1_Ac_18y)
GWI_cum_NonRW_S1_Tgr_40y = np.cumsum(GWI_inst_tot_NonRW_S1_Tgr_40y)
GWI_cum_NonRW_S1_Tgr_60y = np.cumsum(GWI_inst_tot_NonRW_S1_Tgr_60y)
GWI_cum_NonRW_E_Hbr_40y = np.cumsum(GWI_inst_tot_NonRW_E_Hbr_40y)
#print(GWI_cum_NonRW_S1_Ac_18y)
plt.xlabel('Time (year)')
#plt.ylabel('GWI_cum (10$^{-10}$ W/m$^2$)')
plt.ylabel('GWI_cum (W/m$^2$)')
plt.xlim(0,200)
plt.ylim(-1e-7,1.5e-7)
plt.title('Cumulative GWI, DL_FP_EC')
plt.plot(t, GWI_cum_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55)
#plt.plot(t, GWI_cum_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55)
plt.plot(t, GWI_cum_S1_Ac_7y, color='olive', label='M_EC_Ac_7y')
plt.plot(t, GWI_cum_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y')
#plt.plot(t, GWI_cum_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y')
plt.plot(t, GWI_cum_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y')
plt.plot(t, GWI_cum_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y')
plt.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
plt.grid(True)
#plt.fill_between(t, GWI_cum_NonRW_S1_Tgr_60y, GWI_cum_NonRW_S1_Ac_7y, color='lightcoral', alpha=0.3)
plt.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWI_cum_NonRW_DL_FP_EC', dpi=300)
plt.show()
#%%
#Step (18): Determine the Instantenous and Cumulative GWI for the emission reference (1 kg CO2 emission at time zero) before performing dynamic GWP calculation
t = np.arange(0,tf-1,1)
matrix_GWI_ref = (tf-1,1)
GWI_inst_ref = np.zeros(matrix_GWI_ref)
for t in range(0,tf-1):
GWI_inst_ref[t,0] = np.sum(np.multiply(emission_CO2_ref,DCF_CO2_ti[:,t]))
#print(GWI_inst_ref[:,0])
len(GWI_inst_ref)
#determine the GWI cumulative for the emission reference
t = np.arange(0,tf-1,1)
GWI_cum_ref = np.cumsum(GWI_inst_ref[:,0])
#print(GWI_cum_ref)
plt.xlabel('Time (year)')
plt.ylabel('GWI_cum_ref (10$^{-13}$ W/m$^2$.kgCO$_2$)')
plt.plot(t, GWI_cum_ref)
len(GWI_cum_ref)
#%%
#Step (19): Calculate dynamic global warming potential (GWPdyn)
##Wood-based
GWP_dyn_cum_S1_Ac_7y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Ac_7y, GWI_cum_ref)]
GWP_dyn_cum_S1_Ac_18y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Ac_18y, GWI_cum_ref)]
GWP_dyn_cum_S1_Tgr_40y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Tgr_40y, GWI_cum_ref)]
GWP_dyn_cum_S1_Tgr_60y = [x/(y*1000) for x,y in zip(GWI_cum_S1_Tgr_60y, GWI_cum_ref)]
GWP_dyn_cum_E_Hbr_40y = [x/(y*1000) for x,y in zip(GWI_cum_E_Hbr_40y, GWI_cum_ref)]
##NonRW
GWP_dyn_cum_NonRW_S1_Ac_7y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Ac_7y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S1_Ac_18y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Ac_18y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S1_Tgr_40y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Tgr_40y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_S1_Tgr_60y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_S1_Tgr_60y, GWI_cum_ref)]
GWP_dyn_cum_NonRW_E_Hbr_40y = [x/(y*1000) for x,y in zip(GWI_cum_NonRW_E_Hbr_40y, GWI_cum_ref)]
#print(GWP_dyn_cum_NonRW_S1_Ac_18y)
fig=plt.figure()
fig.show()
ax=fig.add_subplot(111)
ax.plot(t, GWP_dyn_cum_NonRW_S1_Ac_7y, color='olive', label='NR_M_EC_Ac_7y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_NonRW_S1_Ac_18y, color='forestgreen', label='NR_M_EC_Ac_18y', ls='--', alpha=0.55)
#ax.plot(t, GWP_dyn_cum_NonRW_S1_Tgr_40y, color='lightcoral', label='NR_M_EC_Tgr_40y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_NonRW_S1_Tgr_60y, color='deeppink', label='NR_M_EC_Tgr_60y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_NonRW_E_Hbr_40y, color='royalblue', label='NR_E_EC_Hbr_40y', ls='--', alpha=0.55)
ax.plot(t, GWP_dyn_cum_S1_Ac_7y, color='olive', label='M_EC_Ac_7y')
ax.plot(t, GWP_dyn_cum_S1_Ac_18y, color='forestgreen', label='M_EC_Ac_18y')
#ax.plot(t, GWP_dyn_cum_S1_Tgr_40y, color='lightcoral', label='M_EC_Tgr_40y')
ax.plot(t, GWP_dyn_cum_S1_Tgr_60y, color='deeppink', label='M_EC_Tgr_60y')
ax.plot(t, GWP_dyn_cum_E_Hbr_40y, color='royalblue', label='E_EC_Hbr_40y')
ax.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
#plt.fill_between(t, GWP_dyn_cum_NonRW_S1_Ac_7y, GWP_dyn_cum_NonRW_S1_Tgr_60y, color='lightcoral', alpha=0.3)
plt.grid(True)
ax.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax.set_xlim(0,200)
ax.set_ylim(-750,1000)
#ax.set_ylim(-600,1500)
ax.set_xlabel('Time (year)')
ax.set_ylabel('GWP$_{dyn}$ (t-CO$_2$-eq)')
ax.set_title('Dynamic GWP, DL_FP_EC')
plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_cum_NonRW_DL_FP_S1', dpi=300)
plt.draw()
#%%
#Step (20): Exporting the data behind result graphs to Excel
year = []
for x in range (0, 201):
year.append(x)
### Create Column
Col1 = year
##GWI_Inst
#GWI_inst from wood-based scenarios
Col_GI_1 = GWI_inst_tot_S1_Ac_7y
Col_GI_2 = GWI_inst_tot_S1_Ac_18y
Col_GI_3 = GWI_inst_tot_S1_Tgr_60y
Col_GI_4 = GWI_inst_tot_E_Hbr_40y
#print(Col_GI_1)
#print(np.shape(Col_GI_1))
#GWI_inst from counter use scenarios
Col_GI_5 = GWI_inst_tot_NonRW_S1_Ac_7y
Col_GI_6 = GWI_inst_tot_NonRW_S1_Ac_18y
Col_GI_7 = GWI_inst_tot_NonRW_S1_Tgr_60y
Col_GI_8 = GWI_inst_tot_NonRW_E_Hbr_40y
#print(Col_GI_7)
#print(np.shape(Col_GI_7))
#create column results
##GWI_cumulative
#GWI_cumulative from wood-based scenarios
Col_GC_1 = GWI_cum_S1_Ac_7y
Col_GC_2 = GWI_cum_S1_Ac_18y
Col_GC_3 = GWI_cum_S1_Tgr_60y
Col_GC_4 = GWI_cum_E_Hbr_40y
#GWI_cumulative from counter use scenarios
Col_GC_5 = GWI_cum_NonRW_S1_Ac_7y
Col_GC_6 = GWI_cum_NonRW_S1_Ac_18y
Col_GC_7 = GWI_cum_NonRW_S1_Tgr_60y
Col_GC_8 = GWI_cum_NonRW_E_Hbr_40y
#create column results
##GWPdyn
#GWPdyn from wood-based scenarios
Col_GWP_1 = GWP_dyn_cum_S1_Ac_7y
Col_GWP_2 = GWP_dyn_cum_S1_Ac_18y
Col_GWP_3 = GWP_dyn_cum_S1_Tgr_60y
Col_GWP_4 = GWP_dyn_cum_E_Hbr_40y
#GWPdyn from counter use scenarios
Col_GWP_5 = GWP_dyn_cum_NonRW_S1_Ac_7y
Col_GWP_6 = GWP_dyn_cum_NonRW_S1_Ac_18y
Col_GWP_7 = GWP_dyn_cum_NonRW_S1_Tgr_60y
Col_GWP_8 = GWP_dyn_cum_NonRW_E_Hbr_40y
#Create colum results
dfM_EC_GI = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (W/m2)':Col_GI_1, 'M_EC_Ac_18y (W/m2)':Col_GI_2,
'M_EC_Tgr_60y (W/m2)':Col_GI_3, 'E_EC_Hbr_40y (W/m2)':Col_GI_4,
'NR_M_EC_Ac_7y (W/m2)':Col_GI_5, 'NR_M_EC_Ac_18y (W/m2)':Col_GI_6,
'NR_M_EC_Tgr_60y (W/m2)':Col_GI_7, 'NR_E_EC_Hbr_40y (W/m2)':Col_GI_8})
dfM_EC_GC = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (W/m2)':Col_GC_1, 'M_EC_Ac_18y (W/m2)':Col_GC_2,
'M_EC_Tgr_60y (W/m2)':Col_GC_3, 'E_EC_Hbr_40y (W/m2)':Col_GC_4,
'NR_M_EC_Ac_7y (W/m2)':Col_GC_5, 'NR_M_EC_Ac_18y (W/m2)':Col_GC_6,
'NR_M_EC_Tgr_60y (W/m2)':Col_GC_7, 'NR_E_EC_Hbr_40y (W/m2)':Col_GC_8})
dfM_EC_GWPdyn = pd.DataFrame.from_dict({'Year':Col1,'M_EC_Ac_7y (t-CO2-eq)':Col_GWP_1, 'M_EC_Ac_18y (t-CO2-eq)':Col_GWP_2,
'M_EC_Tgr_60y (t-CO2-eq)':Col_GWP_3, 'E_EC_Hbr_40y (t-CO2-eq)':Col_GWP_4,
'NR_M_EC_Ac_7y (t-CO2-eq)':Col_GWP_5, 'NR_M_EC_Ac_18y (t-CO2-eq)':Col_GWP_6,
'NR_M_EC_Tgr_60y (t-CO2-eq)':Col_GWP_7, 'NR_E_EC_Hbr_40y (t-CO2-eq)':Col_GWP_8})
#Export to excel
writer = pd.ExcelWriter('GraphResults_DL_FP_EC_RB.xlsx', engine = 'xlsxwriter')
dfM_EC_GI.to_excel(writer, sheet_name = 'GWI_Inst_DL_FP_EC', header=True, index=False )
dfM_EC_GC.to_excel(writer, sheet_name = 'Cumulative GWI_DL_FP_EC', header=True, index=False )
dfM_EC_GWPdyn.to_excel(writer, sheet_name = 'GWPdyn_DL_FP_EC', header=True, index=False )
writer.save()
writer.close()
#%%
#Step (21): Generate the excel file for the individual carbon emission and sequestration flows
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
division = 1000*44/12
division_CH4 = 1000*16/12
#M_Ac_7y
c_firewood_energy_S1_Ac7 = [x/division for x in c_firewood_energy_S1_Ac7]
decomp_tot_S1_Ac_7y = [x/division for x in decomp_tot_S1_Ac_7y]
TestDSM1_Ac7.o = [x/division for x in TestDSM1_Ac7.o]
PH_Emissions_HWP1_Ac_7y = [x/division for x in PH_Emissions_HWP1_Ac_7y]
#OC_storage_S1_Ac7 = [x/division for x in OC_storage_S1_Ac7]
flat_list_Ac_7y = [x/division for x in flat_list_Ac_7y]
decomp_tot_CO2_S1_Ac_7y[:,0] = [x/division for x in decomp_tot_CO2_S1_Ac_7y[:,0]]
decomp_tot_CH4_S1_Ac_7y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Ac_7y[:,0]]
#M_Ac_18y
c_firewood_energy_S1_Ac18 = [x/division for x in c_firewood_energy_S1_Ac18]
decomp_tot_S1_Ac_18y = [x/division for x in decomp_tot_S1_Ac_18y]
TestDSM1_Ac18.o = [x/division for x in TestDSM1_Ac18.o]
PH_Emissions_HWP1_Ac_18y = [x/division for x in PH_Emissions_HWP1_Ac_18y]
#OC_storage_S1_Ac18 = [x/division for x in OC_storage_S1_Ac18]
flat_list_Ac_18y = [x/division for x in flat_list_Ac_18y]
decomp_tot_CO2_S1_Ac_18y[:,0] = [x/division for x in decomp_tot_CO2_S1_Ac_18y[:,0]]
decomp_tot_CH4_S1_Ac_18y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Ac_18y[:,0]]
#M_Tgr_60y
c_firewood_energy_S1_Tgr60 = [x/division for x in c_firewood_energy_S1_Tgr60]
decomp_tot_S1_Tgr_60y = [x/division for x in decomp_tot_S1_Tgr_60y]
TestDSM1_Tgr60.o = [x/division for x in TestDSM1_Tgr60.o]
PH_Emissions_HWP1_Tgr_60y = [x/division for x in PH_Emissions_HWP1_Tgr_60y]
#OC_storage_S1_Tgr60 = [x/division for x in OC_storage_S1_Tgr60]
flat_list_Tgr_60y = [x/division for x in flat_list_Tgr_60y]
decomp_tot_CO2_S1_Tgr_60y[:,0] = [x/division for x in decomp_tot_CO2_S1_Tgr_60y[:,0]]
decomp_tot_CH4_S1_Tgr_60y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_S1_Tgr_60y[:,0]]
#E_Hbr_40y
c_firewood_energy_E_Hbr40 = [x/division for x in c_firewood_energy_E_Hbr40]
c_pellets_Hbr_40y = [x/division for x in c_pellets_Hbr_40y]
decomp_tot_E_Hbr_40y = [x/division for x in decomp_tot_E_Hbr_40y]
TestDSME_Hbr40.o = [x/division for x in TestDSME_Hbr40.o]
PH_Emissions_HWPE_Hbr_40y = [x/division for x in PH_Emissions_HWPE_Hbr_40y]
##ColumnOC_storage_E_Hbr40 = [x/division for x in OC_storage_E_Hbr40]
flat_list_Hbr_40y = [x/division for x in flat_list_Hbr_40y]
decomp_tot_CO2_E_Hbr_40y[:,0] = [x/division for x in decomp_tot_CO2_E_Hbr_40y[:,0]]
decomp_tot_CH4_E_Hbr_40y[:,0] = [x/division_CH4 for x in decomp_tot_CH4_E_Hbr_40y[:,0]]
#landfill aggregate flows
Landfill_decomp_DL_FP_S1_Ac_7y = decomp_tot_CH4_S1_Ac_7y, decomp_tot_CO2_S1_Ac_7y
Landfill_decomp_DL_FP_S1_Ac_18y = decomp_tot_CH4_S1_Ac_18y, decomp_tot_CO2_S1_Ac_18y
Landfill_decomp_DL_FP_S1_Tgr_60y = decomp_tot_CH4_S1_Tgr_60y, decomp_tot_CO2_S1_Tgr_60y
Landfill_decomp_DL_FP_E_Hbr_40y = decomp_tot_CH4_E_Hbr_40y, decomp_tot_CO2_E_Hbr_40y
Landfill_decomp_DL_FP_S1_Ac_7y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_S1_Ac_7y)]
Landfill_decomp_DL_FP_S1_Ac_18y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_S1_Ac_18y)]
Landfill_decomp_DL_FP_S1_Tgr_60y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_S1_Tgr_60y)]
Landfill_decomp_DL_FP_E_Hbr_40y = [sum(x) for x in zip(*Landfill_decomp_DL_FP_E_Hbr_40y)]
Landfill_decomp_DL_FP_S1_Ac_7y = [item for sublist in Landfill_decomp_DL_FP_S1_Ac_7y for item in sublist]
Landfill_decomp_DL_FP_S1_Ac_18y = [item for sublist in Landfill_decomp_DL_FP_S1_Ac_18y for item in sublist]
Landfill_decomp_DL_FP_S1_Tgr_60y = [item for sublist in Landfill_decomp_DL_FP_S1_Tgr_60y for item in sublist]
Landfill_decomp_DL_FP_E_Hbr_40y = [item for sublist in Landfill_decomp_DL_FP_E_Hbr_40y for item in sublist]
#M_Ac_7y
Column1 = year
Column2 = c_firewood_energy_S1_Ac7
Column3 = decomp_tot_S1_Ac_7y
Column4 = TestDSM1_Ac7.o
Column5 = PH_Emissions_HWP1_Ac_7y
#Column6_1 = OC_storage_S1_Ac7
Column6 = Landfill_decomp_DL_FP_S1_Ac_7y
Column7 = flat_list_Ac_7y
#M_Ac_18y
Column8 = c_firewood_energy_S1_Ac18
Column9 = decomp_tot_S1_Ac_18y
Column10 = TestDSM1_Ac18.o
Column11 = PH_Emissions_HWP1_Ac_18y
#Column12_1 = OC_storage_S1_Ac18
Column12 = Landfill_decomp_DL_FP_S1_Ac_18y
Column13 = flat_list_Ac_18y
#M_Tgr_60y
Column14 = c_firewood_energy_S1_Tgr60
Column15 = decomp_tot_S1_Tgr_60y
Column16 = TestDSM1_Tgr60.o
Column17 = PH_Emissions_HWP1_Tgr_60y
#Column18_1 = OC_storage_S1_Tgr60
Column18 = Landfill_decomp_DL_FP_S1_Tgr_60y
Column19 = flat_list_Tgr_60y
#E_Hbr_40y
Column20 = c_firewood_energy_E_Hbr40
Column20_1 = c_pellets_Hbr_40y
Column21 = decomp_tot_E_Hbr_40y
Column22 = TestDSME_Hbr40.o
Column23 = PH_Emissions_HWPE_Hbr_40y
#Column24_1 = OC_storage_E_Hbr40
Column24 = Landfill_decomp_DL_FP_E_Hbr_40y
Column25 = flat_list_Hbr_40y
#create columns
dfM_Ac_7y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column7,
# '9: Landfill storage (t-C)':Column6_1,
'F1-0: Residue decomposition (t-C)':Column3,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column2,
'F8-0: Operational stage/processing emissions (t-C)':Column5,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column4,
'F7-0: Landfill gas decomposition (t-C)':Column6})
dfM_Ac_18y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column13,
# '9: Landfill storage (t-C)':Column12_1,
'F1-0: Residue decomposition (t-C)':Column9,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column8,
'F8-0: Operational stage/processing emissions (t-C)':Column11,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column10,
'F7-0: Landfill gas decomposition (t-C)':Column12})
dfE_Tgr_60y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column19,
# '9: Landfill storage (t-C)':Column18_1,
'F1-0: Residue decomposition (t-C)':Column15,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column14,
'F8-0: Operational stage/processing emissions (t-C)':Column17,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column16,
'F7-0: Landfill gas decomposition (t-C)':Column18})
dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Column1,'F0-1: Biomass C sequestration (t-C)':Column25,
# '9: Landfill storage (t-C)':Column24_1,
'F1-0: Residue decomposition (t-C)':Column21,
'F6-0-1: Emissions from firewood/other energy use (t-C)':Column20,
'F8-0: Operational stage/processing emissions (t-C)':Column23,
'F6-0-2: Energy use emissions from in-use stocks outflow (t-C)':Column22,
'F7-0: Landfill gas decomposition (t-C)':Column24,
'F4-0: Emissions from wood pellets use (t-C)':Column20_1})
writer = pd.ExcelWriter('C_flows_DL_FP_EC_RB.xlsx', engine = 'xlsxwriter')
dfM_Ac_7y.to_excel(writer, sheet_name = 'DL_FP_M_Ac_7y (EC)', header=True, index=False)
dfM_Ac_18y.to_excel(writer, sheet_name = 'DL_FP_M_Ac_18y (EC)', header=True, index=False)
dfE_Tgr_60y.to_excel(writer, sheet_name = 'DL_FP_M_Tgr_60y (EC)', header=True, index=False)
dfE_Hbr_40y.to_excel(writer, sheet_name = 'DL_FP_E_Hbr_40y (EC)', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (22): Plot of the individual carbon emission and sequestration flows for normal and symlog-scale graphs
#DL_FP_M_EC_Ac_7y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1_s=fig.add_subplot(111)
#plot
ax1_s.plot(t, flat_list_Ac_7y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1_s.plot(t, OC_storage_S1_Ac7, color='darkturquoise', label='9: Landfill storage')
ax1_s.plot(t, decomp_tot_S1_Ac_7y, color='lightcoral', label='F1-0: Residue decomposition')
ax1_s.plot(t, c_firewood_energy_S1_Ac7, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1_s.plot(t, PH_Emissions_HWP1_Ac_7y, color='orange', label='F8-0: Operational stage/processing emissions')
ax1_s.plot(t, TestDSM1_Ac7.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1_s.plot(t, Landfill_decomp_DL_FP_S1_Ac_7y, color='yellow', label='F7-0: Landfill gas decomposition')
ax1_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1_s.set_xlim(-1,200)
ax1_s.set_yscale('symlog')
ax1_s.set_xlabel('Time (year)')
ax1_s.set_ylabel('C flows (t-C) (symlog)')
ax1_s.set_title('Carbon flow, DL_FP_M_EC_Ac_7y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_M_EC_Ac_7y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax1=fig.add_subplot(111)
ax1.plot(t, flat_list_Ac_7y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax1.plot(t, OC_storage_S1_Ac7, color='darkturquoise', label='9: Landfill storage')
ax1.plot(t, decomp_tot_S1_Ac_7y, color='lightcoral', label='F1-0: Residue decomposition')
ax1.plot(t, c_firewood_energy_S1_Ac7, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax1.plot(t, PH_Emissions_HWP1_Ac_7y, color='orange', label='F8-0: Operational stage/processing emissions')
ax1.plot(t, TestDSM1_Ac7.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax1.plot(t, Landfill_decomp_DL_FP_S1_Ac_7y, color='yellow', label='F7-0: Landfill gas decomposition')
ax1.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax1.set_xlim(0,200)
ax1.set_xlabel('Time (year)')
ax1.set_ylabel('C flows(t-C)')
ax1.set_title('Carbon flow, DL_FP_M_Ac_7y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#DL_FP_M_EC_Ac_18y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2_s=fig.add_subplot(111)
#plot
ax2_s.plot(t, flat_list_Ac_18y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2_s.plot(t, OC_storage_S1_Ac18, color='darkturquoise', label='9: Landfill storage')
ax2_s.plot(t, decomp_tot_S1_Ac_18y, color='lightcoral', label='F1-0: Residue decomposition')
ax2_s.plot(t, c_firewood_energy_S1_Ac18, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2_s.plot(t, PH_Emissions_HWP1_Ac_18y, color='orange', label='F8-0: Operational stage/processing emissions')
ax2_s.plot(t, TestDSM1_Ac18.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax2_s.plot(t, Landfill_decomp_DL_FP_S1_Ac_18y, color='yellow', label='F7-0: Landfill gas decomposition')
ax2_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2_s.set_xlim(-1,200)
ax2_s.set_yscale('symlog')
ax2_s.set_xlabel('Time (year)')
ax2_s.set_ylabel('C flows (t-C) (symlog)')
ax2_s.set_title('Carbon flow, DL_FP_M_EC_Ac_18y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_M_EC_Ac_18y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax2=fig.add_subplot(111)
#plot
ax2.plot(t, flat_list_Ac_18y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax2.plot(t, OC_storage_S1_Ac18, color='darkturquoise', label='9: Landfill storage')
ax2.plot(t, decomp_tot_S1_Ac_18y, color='lightcoral', label='F1-0: Residue decomposition')
ax2.plot(t, c_firewood_energy_S1_Ac18, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax2.plot(t, PH_Emissions_HWP1_Ac_18y, color='orange', label='F8-0: Operational stage/processing emissions')
ax2.plot(t, TestDSM1_Ac18.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax2.plot(t, Landfill_decomp_DL_FP_S1_Ac_18y, color='yellow', label='F7-0: Landfill gas decomposition')
ax2.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax2.set_xlim(0,200)
ax2.set_xlabel('Time (year)')
ax2.set_ylabel('C flows(t-C)')
ax2.set_title('Carbon flow, DL_FP_M_Ac_18y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#DL_FP_M_EC_Tgr_60y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax3_s=fig.add_subplot(111)
#plot
ax3_s.plot(t, flat_list_Tgr_60y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax3_s.plot(t, OC_storage_S1_Tgr60, color='darkturquoise', label='9: Landfill storage')
ax3_s.plot(t, decomp_tot_S1_Tgr_60y, color='lightcoral', label='F1-0: Residue decomposition')
ax3_s.plot(t, c_firewood_energy_S1_Tgr60, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax3_s.plot(t, PH_Emissions_HWP1_Tgr_60y, color='orange', label='F8-0: Operational stage/processing emissions')
ax3_s.plot(t, TestDSM1_Tgr60.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax3_s.plot(t, Landfill_decomp_DL_FP_S1_Tgr_60y, color='yellow', label='F7-0: Landfill gas decomposition')
ax3_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3_s.set_xlim(-1,200)
ax3_s.set_yscale('symlog')
ax3_s.set_xlabel('Time (year)')
ax3_s.set_ylabel('C flows (t-C) (symlog)')
ax3_s.set_title('Carbon flow, DL_FP_M_EC_Tgr_60y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_M_EC_Tgr_60y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax3=fig.add_subplot(111)
#plot
ax3.plot(t, flat_list_Tgr_60y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax3.plot(t, OC_storage_S1_Tgr60, color='darkturquoise', label='9: Landfill storage')
ax3.plot(t, decomp_tot_S1_Tgr_60y, color='lightcoral', label='F1-0: Residue decomposition')
ax3.plot(t, c_firewood_energy_S1_Tgr60, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax3.plot(t, PH_Emissions_HWP1_Tgr_60y, color='orange', label='F8-0: Operational stage/processing emissions')
ax3.plot(t, TestDSM1_Tgr60.o, color='royalblue', label='F6-0-2: Energy use emissions from in-use stocks outflow')
ax3.plot(t, Landfill_decomp_DL_FP_S1_Tgr_60y, color='yellow', label='F7-0: Landfill gas decomposition')
ax3.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax3.set_xlim(0,200)
ax3.set_xlabel('Time (year)')
ax3.set_ylabel('C flows(t-C)')
ax3.set_title('Carbon flow, DL_FP_M_Tgr_60y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
plt.draw()
#%%
#plot for the individual carbon flows - test for symlog-scale graphs
#DL_FP_E_EC_Hbr_40y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax4_s=fig.add_subplot(111)
#plot
ax4_s.plot(t, flat_list_Hbr_40y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax4_s.plot(t, OC_storage_E_Hbr40, color='darkturquoise', label='9: Landfill storage')
ax4_s.plot(t, decomp_tot_E_Hbr_40y, color='lightcoral', label='F1-0: Residue decomposition')
ax4_s.plot(t, c_firewood_energy_E_Hbr40, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax4_s.plot(t, PH_Emissions_HWPE_Hbr_40y, color='orange', label='F8-0: Operational stage/processing emissions')
ax4_s.plot(t, Landfill_decomp_DL_FP_E_Hbr_40y, color='yellow', label='F7-0: Landfill gas decomposition')
ax4_s.plot(t, c_pellets_Hbr_40y, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax4_s.plot(t, TestDSME_Hbr40.o, label='in-use stock output')
ax4_s.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax4_s.set_xlim(-1,200)
ax4_s.set_yscale('symlog')
ax4_s.set_xlabel('Time (year)')
ax4_s.set_ylabel('C flows (t-C) (symlog)')
ax4_s.set_title('Carbon flow, DL_FP_E_EC_Hbr_40y (EC) (symlog-scale)')
plt.show()
#%%
#plot for the individual carbon flows
#DL_FP_E_Hbr_40y (Existing conversion efficiency)
fig=plt.figure()
fig.show()
ax4=fig.add_subplot(111)
#plot
ax4.plot(t, flat_list_Hbr_40y, color='darkkhaki', label='F0-1: Biomass C sequestration')
#ax4.plot(t, OC_storage_E_Hbr40, color='darkturquoise', label='9: Landfill storage')
ax4.plot(t, decomp_tot_E_Hbr_40y, color='lightcoral', label='F1-0: Residue decomposition')
ax4.plot(t, c_firewood_energy_E_Hbr40, color='mediumseagreen', label='F6-0-1: Emissions from firewood/other energy use')
ax4.plot(t, PH_Emissions_HWPE_Hbr_40y, color='orange', label='F8-0: Operational stage/processing emissions')
ax4.plot(t, Landfill_decomp_DL_FP_E_Hbr_40y, color='yellow', label='F7-0: Landfill gas decomposition')
ax4.plot(t, c_pellets_Hbr_40y, color='slategrey', label='F4-0: Emissions from wood pellets use')
#ax_g.plot(t, TestDSME_Hbr40.o, label='in-use stock output')
ax4.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax4.set_xlim(0,200)
ax4.set_xlabel('Time (year)')
ax4.set_ylabel('C flows(t-C)')
ax4.set_title('Carbon flow, DL_FP_E_Hbr_40y (EC)')
#plt.savefig('C:\Work\Data\ID Future Scenarios\Hectare-based\Fig\GWP_dyn_1_RIL_M')
#%%
#Step (23): Generate the excel file for the net carbon balance
Agg_Cflow_S1_Ac_7y = [c_firewood_energy_S1_Ac7, decomp_tot_S1_Ac_7y, TestDSM1_Ac7.o, PH_Emissions_HWP1_Ac_7y, Landfill_decomp_DL_FP_S1_Ac_7y, flat_list_Ac_7y]
Agg_Cflow_S1_Ac_18y = [c_firewood_energy_S1_Ac18, decomp_tot_S1_Ac_18y, TestDSM1_Ac18.o, PH_Emissions_HWP1_Ac_18y, Landfill_decomp_DL_FP_S1_Ac_18y, flat_list_Ac_18y]
Agg_Cflow_S1_Tgr_60y = [c_firewood_energy_S1_Tgr60, decomp_tot_S1_Tgr_60y, TestDSM1_Tgr60.o, PH_Emissions_HWP1_Tgr_60y, Landfill_decomp_DL_FP_S1_Tgr_60y, flat_list_Tgr_60y]
Agg_Cflow_E_Hbr_40y = [c_firewood_energy_E_Hbr40, c_pellets_Hbr_40y, decomp_tot_E_Hbr_40y, TestDSME_Hbr40.o, PH_Emissions_HWPE_Hbr_40y, Landfill_decomp_DL_FP_E_Hbr_40y, flat_list_Hbr_40y]
Agg_Cflow_DL_FP_S1_Ac_7y = [sum(x) for x in zip(*Agg_Cflow_S1_Ac_7y)]
Agg_Cflow_DL_FP_S1_Ac_18y = [sum(x) for x in zip(*Agg_Cflow_S1_Ac_18y)]
Agg_Cflow_DL_FP_S1_Tgr_60y = [sum(x) for x in zip(*Agg_Cflow_S1_Tgr_60y)]
Agg_Cflow_DL_FP_E_Hbr_40y = [sum(x) for x in zip(*Agg_Cflow_E_Hbr_40y)]
#create column year
year = []
for x in range (0, 201):
year.append(x)
print (year)
#Create colum results
dfM_DL_FP_EC = pd.DataFrame.from_dict({'Year':year,'M_EC_Ac_7y (t-C)':Agg_Cflow_DL_FP_S1_Ac_7y, 'M_EC_Ac_18y (t-C)':Agg_Cflow_DL_FP_S1_Ac_18y,
'M_EC_Tgr_60y (t-C)':Agg_Cflow_DL_FP_S1_Tgr_60y, 'E_EC_Hbr_40y (t-C)':Agg_Cflow_DL_FP_E_Hbr_40y})
#Export to excel
writer = pd.ExcelWriter('AggCFlow_DL_FP_EC_RB.xlsx', engine = 'xlsxwriter')
dfM_DL_FP_EC.to_excel(writer, sheet_name = 'DL_FP_EC', header=True, index=False)
writer.save()
writer.close()
#%%
#Step (24): Plot the net carbon balance
fig=plt.figure()
fig.show()
ax5=fig.add_subplot(111)
# plot
ax5.plot(t, Agg_Cflow_DL_FP_S1_Ac_7y, color='orange', label='M_EC_Ac_7y')
ax5.plot(t, Agg_Cflow_DL_FP_S1_Ac_18y, color='darkturquoise', label='M_EC_Ac_18y')
ax5.plot(t, Agg_Cflow_DL_FP_S1_Tgr_60y, color='lightcoral', label='M_EC_Tgr_60y')
ax5.plot(t, Agg_Cflow_DL_FP_E_Hbr_40y, color='mediumseagreen', label='E_EC_Hbr_40y')
ax5.plot(t, zerolistmaker(tf-1), color='black', label='Zero line', ls='--', alpha=0.75)
ax5.legend(bbox_to_anchor=(1.04,1), loc="upper left", frameon=False)
ax5.set_xlim(-1,200)
ax5.set_ylim(-25,220)
#ax5.set_yscale('symlog')
ax5.set_xlabel('Time (year)')
ax5.set_ylabel('C flows (t-C)')
ax5.set_title('Net carbon balance, DL_FP_EC')
plt.show()
#%%
#Step (25): Generate the excel file for documentation of individual carbon flows in the system definition (Fig. 1)
#print year column
year = []
for x in range (0, 201):
year.append(x)
print (year)
df2_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
df2_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
df2_Tgr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_40y')
df2_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
dfE2_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
Column1 = year
division = 1000*44/12
division_CH4 = 1000*16/12
## S1_Ac_7y
## define the input flow for the landfill (F5-7)
OC_storage_S1_Ac7 = df1_Ac7['Other_C_storage'].values
OC_storage_S1_Ac7 = [x/division for x in OC_storage_S1_Ac7]
OC_storage_S1_Ac7 = [abs(number) for number in OC_storage_S1_Ac7]
C_LF_S1_Ac7 = [x*1/0.82 for x in OC_storage_S1_Ac7]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S1_Ac7 = [x/division for x in df1_Ac7['Input_PF'].values]
HWP_S1_Ac7_energy = [x*1/3 for x in c_firewood_energy_S1_Ac7]
HWP_S1_Ac7_landfill = [x*1/0.82 for x in OC_storage_S1_Ac7]
HWP_S1_Ac7_sum = [HWP_S1_Ac7, HWP_S1_Ac7_energy, HWP_S1_Ac7_landfill]
HWP_S1_Ac7_sum = [sum(x) for x in zip(*HWP_S1_Ac7_sum )]
#in-use stocks (S-4)
TestDSM1_Ac7.s = [x/division for x in TestDSM1_Ac7.s]
#TestDSM1_Ac7.i = [x/division for x in TestDSM1_Ac7.i]
#calculate the F1-2
#In general, F1-2 = F2-3 + F2-6,
#To split the F1-2 to F1a-2 and F1c-2, we need to differentiate the flow for the initial land conversion (PF) and the subsequent land type (FP)
#create F1a-2
#tf = 201
#zero_PF_S2_Ac_7y = (tf,1)
#PF_S2_Ac_7y = np.zeros(zero_PF_S2_Ac_7y)
#PF_S2_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S2_Ac7_sum, [x*2/3 for x in c_firewood_energy_S2_Ac7])][0:8]
#create F1c-2
#zero_FP_S2_Ac_7y = (tf,1)
#FP_S2_Ac_7y = np.zeros(zero_FP_S2_Ac_7y)
#FP_S2_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S2_Ac7_sum, [x*2/3 for x in c_firewood_energy_S2_Ac7])][8:tf]
# calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S1_Ac_7y = (tf,1)
stocks_S1_Ac_7y = np.zeros(zero_matrix_stocks_S1_Ac_7y)
i = 0
stocks_S1_Ac_7y[0] = C_LF_S1_Ac7[0] - Landfill_decomp_DL_FP_S1_Ac_7y[0]
while i < tf-1:
stocks_S1_Ac_7y[i+1] = np.array(C_LF_S1_Ac7[i+1] - Landfill_decomp_DL_FP_S1_Ac_7y[i+1] + stocks_S1_Ac_7y[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S1_Ac_7y = [x1+x2 for (x1,x2) in zip(HWP_S1_Ac7_sum, [x*2/3 for x in c_firewood_energy_S1_Ac7])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1_Ac_7y = (tf,1)
ForCstocks_S1_Ac_7y = np.zeros(zero_matrix_ForCstocks_S1_Ac_7y)
i = 0
ForCstocks_S1_Ac_7y[0] = initAGB - flat_list_Ac_7y[0] - decomp_tot_S1_Ac_7y[0] - HWP_logged_S1_Ac_7y[0]
while i < tf-1:
ForCstocks_S1_Ac_7y[i+1] = np.array(ForCstocks_S1_Ac_7y[i] - flat_list_Ac_7y[i+1] - decomp_tot_S1_Ac_7y[i+1] - HWP_logged_S1_Ac_7y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df1_amount_Ac7 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_7y')
NonRW_amount_S1_Ac_7y = df1_amount_Ac7['NonRW_amount'].values
NonRW_amount_S1_Ac_7y = [x/1000 for x in NonRW_amount_S1_Ac_7y]
##NonRW emissions (F9-0-2)
emissions_NonRW_S1_Ac_7y = [x/division for x in emissions_NonRW_S1_Ac_7y]
#create columns
dfM_Ac_7y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Ac_7y,
'F1-0 (t-C)': decomp_tot_S1_Ac_7y,
#'F1a-2 (t-C)': PF_S1_Ac_7y,
#'F1c-2 (t-C)': FP_S1_Ac_7y,
'F1-2 (t-C)': HWP_logged_S1_Ac_7y,
'St-1 (t-C)':ForCstocks_S1_Ac_7y[:,0],
'F2-3 (t-C)': HWP_S1_Ac7_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Ac7],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Ac7_sum, [x*1/0.82 for x in OC_storage_S1_Ac7], [x*1/3 for x in c_firewood_energy_S1_Ac7])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Ac7],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Ac7],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM1_Ac7.s,
#'S-4-i (t-C)': TestDSM1_Ac7.i,
'F4-5 (t-C)': TestDSM1_Ac7.o,
'F5-6 (t-C)': TestDSM1_Ac7.o,
'F5-7 (t-C)': C_LF_S1_Ac7,
'F6-0-1 (t-C)': c_firewood_energy_S1_Ac7,
'F6-0-2 (t-C)': TestDSM1_Ac7.o,
'St-7 (t-C)': stocks_S1_Ac_7y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_S1_Ac_7y,
'F8-0 (t-C)': PH_Emissions_HWP1_Ac_7y,
'S9-0 (t)': NonRW_amount_S1_Ac_7y,
'F9-0 (t-C)': emissions_NonRW_S1_Ac_7y,
})
##S1_Ac_18y
## define the input flow for the landfill (F5-7)
OC_storage_S1_Ac18 = df1_Ac18['Other_C_storage'].values
OC_storage_S1_Ac18 = [x/division for x in OC_storage_S1_Ac18]
OC_storage_S1_Ac18 = [abs(number) for number in OC_storage_S1_Ac18]
C_LF_S1_Ac18 = [x*1/0.82 for x in OC_storage_S1_Ac18]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S1_Ac18 = [x/division for x in df1_Ac18['Input_PF'].values]
HWP_S1_Ac18_energy = [x*1/3 for x in c_firewood_energy_S1_Ac18]
HWP_S1_Ac18_landfill = [x*1/0.82 for x in OC_storage_S1_Ac18]
HWP_S1_Ac18_sum = [HWP_S1_Ac18, HWP_S1_Ac18_energy, HWP_S1_Ac18_landfill]
HWP_S1_Ac18_sum = [sum(x) for x in zip(*HWP_S1_Ac18_sum )]
## in-use stocks (S-4)
TestDSM1_Ac18.s = [x/division for x in TestDSM1_Ac18.s]
#TestDSM1_Ac18.i = [x/division for x in TestDSM1_Ac18.i]
#calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S1_Ac_18y = (tf,1)
stocks_S1_Ac_18y = np.zeros(zero_matrix_stocks_S1_Ac_18y)
i = 0
stocks_S1_Ac_18y[0] = C_LF_S1_Ac18[0] - Landfill_decomp_DL_FP_S1_Ac_18y[0]
while i < tf-1:
stocks_S1_Ac_18y[i+1] = np.array(C_LF_S1_Ac18[i+1] - Landfill_decomp_DL_FP_S1_Ac_18y[i+1] + stocks_S1_Ac_18y[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S1_Ac_18y = [x1+x2 for (x1,x2) in zip(HWP_S1_Ac18_sum, [x*2/3 for x in c_firewood_energy_S1_Ac18])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1_Ac_18y = (tf,1)
ForCstocks_S1_Ac_18y = np.zeros(zero_matrix_ForCstocks_S1_Ac_18y)
i = 0
ForCstocks_S1_Ac_18y[0] = initAGB - flat_list_Ac_18y[0] - decomp_tot_S1_Ac_18y[0] - HWP_logged_S1_Ac_18y[0]
while i < tf-1:
ForCstocks_S1_Ac_18y[i+1] = np.array(ForCstocks_S1_Ac_18y[i] - flat_list_Ac_18y[i+1] - decomp_tot_S1_Ac_18y[i+1] - HWP_logged_S1_Ac_18y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df1_amount_Ac18 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_S1_Ac_18y')
NonRW_amount_S1_Ac_18y = df1_amount_Ac18['NonRW_amount'].values
NonRW_amount_S1_Ac_18y = [x/1000 for x in NonRW_amount_S1_Ac_18y]
##NonRW emissions (F9-0-2)
emissions_NonRW_S1_Ac_18y = [x/division for x in emissions_NonRW_S1_Ac_18y]
#create columns
dfM_Ac_18y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Ac_18y,
'F1-0 (t-C)': decomp_tot_S1_Ac_18y,
#'F1a-2 (t-C)': PF_S1_Ac_18y,
#'F1c-2 (t-C)': FP_S1_Ac_18y,
'F1-2 (t-C)': HWP_logged_S1_Ac_18y,
'St-1 (t-C)':ForCstocks_S1_Ac_18y[:,0],
'F2-3 (t-C)': HWP_S1_Ac18_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Ac18],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Ac18_sum, [x*1/0.82 for x in OC_storage_S1_Ac18], [x*1/3 for x in c_firewood_energy_S1_Ac18])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Ac18],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Ac18],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM1_Ac18.s,
#'S-4-i (t-C)': TestDSM1_Ac7.i,
'F4-5 (t-C)': TestDSM1_Ac18.o,
'F5-6 (t-C)': TestDSM1_Ac18.o,
'F5-7 (t-C)': C_LF_S1_Ac18,
'F6-0-1 (t-C)': c_firewood_energy_S1_Ac18,
'F6-0-2 (t-C)': TestDSM1_Ac18.o,
'St-7 (t-C)': stocks_S1_Ac_18y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_S1_Ac_18y,
'F8-0 (t-C)': PH_Emissions_HWP1_Ac_18y,
'S9-0 (t)': NonRW_amount_S1_Ac_18y,
'F9-0 (t-C)': emissions_NonRW_S1_Ac_18y,
})
##S1_Tgr_60y
## define the input flow for the landfill (F5-7)
OC_storage_S1_Tgr60 = df1_Tgr60['Other_C_storage'].values
OC_storage_S1_Tgr60 = [x/division for x in OC_storage_S1_Tgr60]
OC_storage_S1_Tgr60 = [abs(number) for number in OC_storage_S1_Tgr60]
C_LF_S1_Tgr60 = [x*1/0.82 for x in OC_storage_S1_Tgr60]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_S1_Tgr60 = [x/division for x in df1_Tgr60['Input_PF'].values]
HWP_S1_Tgr60_energy = [x*1/3 for x in c_firewood_energy_S1_Tgr60]
HWP_S1_Tgr60_landfill = [x*1/0.82 for x in OC_storage_S1_Tgr60]
HWP_S1_Tgr60_sum = [HWP_S1_Tgr60, HWP_S1_Tgr60_energy, HWP_S1_Tgr60_landfill]
HWP_S1_Tgr60_sum = [sum(x) for x in zip(*HWP_S1_Tgr60_sum )]
## in-use stocks (S-4)
TestDSM1_Tgr60.s = [x/division for x in TestDSM1_Tgr60.s]
#TestDSM1_Tgr60.i = [x/division for x in TestDSM1_Tgr60.i]
## calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_S1_Tgr_60y = (tf,1)
stocks_S1_Tgr_60y = np.zeros(zero_matrix_stocks_S1_Tgr_60y)
i = 0
stocks_S1_Tgr_60y[0] = C_LF_S1_Tgr60[0] - Landfill_decomp_DL_FP_S1_Tgr_60y[0]
while i < tf-1:
stocks_S1_Tgr_60y[i+1] = np.array(C_LF_S1_Tgr60[i+1] - Landfill_decomp_DL_FP_S1_Tgr_60y[i+1] + stocks_S1_Tgr_60y[i])
i = i + 1
#print(stocks_S2_Ac_7y[:])
#print(type(stocks_S2_Ac_7y))
#print(type(C_LF_S2_Ac7))
#print(type(Landfill_decomp_PF_FP_S2_Ac_7y))
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_S1_Tgr_60y = [x1+x2 for (x1,x2) in zip(HWP_S1_Tgr60_sum, [x*2/3 for x in c_firewood_energy_S1_Tgr60])]
## calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_S1_Tgr_60y = (tf,1)
ForCstocks_S1_Tgr_60y = np.zeros(zero_matrix_ForCstocks_S1_Tgr_60y)
i = 0
ForCstocks_S1_Tgr_60y[0] = initAGB - flat_list_Tgr_60y[0] - decomp_tot_S1_Tgr_60y[0] - HWP_logged_S1_Tgr_60y[0]
while i < tf-1:
ForCstocks_S1_Tgr_60y[i+1] = np.array(ForCstocks_S1_Tgr_60y[i] - flat_list_Tgr_60y[i+1] - decomp_tot_S1_Tgr_60y[i+1] - HWP_logged_S1_Tgr_60y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
df1_amount_Tgr60 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_S1_Tgr_60y')
NonRW_amount_S1_Tgr_60y = df1_amount_Tgr60['NonRW_amount'].values
NonRW_amount_S1_Tgr_60y = [x/1000 for x in NonRW_amount_S1_Tgr_60y]
##NonRW emissions (F9-0-2)
emissions_NonRW_S1_Tgr_60y = [x/division for x in emissions_NonRW_S1_Tgr_60y]
#create columns
dfM_Tgr_60y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Tgr_60y,
'F1-0 (t-C)': decomp_tot_S1_Tgr_60y,
#'F1a-2 (t-C)': PF_S1_Tgr_60y,
#'F1c-2 (t-C)': FP_S1_Tgr_60y,
'F1-2 (t-C)': HWP_logged_S1_Tgr_60y,
'St-1 (t-C)':ForCstocks_S1_Tgr_60y[:,0],
'F2-3 (t-C)': HWP_S1_Tgr60_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_S1_Tgr60],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_S1_Tgr60_sum, [x*1/0.82 for x in OC_storage_S1_Tgr60], [x*1/3 for x in c_firewood_energy_S1_Tgr60])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_S1_Tgr60],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_S1_Tgr60],
# 'F4-0 (t-C)':,
'St-4 (t-C)': TestDSM1_Tgr60.s,
#'S-4-i (t-C)': TestDSM1_Tgr60.i,
'F4-5 (t-C)': TestDSM1_Tgr60.o,
'F5-6 (t-C)': TestDSM1_Tgr60.o,
'F5-7 (t-C)': C_LF_S1_Tgr60,
'F6-0-1 (t-C)': c_firewood_energy_S1_Tgr60,
'F6-0-2 (t-C)': TestDSM1_Tgr60.o,
'St-7 (t-C)': stocks_S1_Tgr_60y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_S1_Tgr_60y,
'F8-0 (t-C)': PH_Emissions_HWP1_Tgr_60y,
'S9-0 (t)': NonRW_amount_S1_Tgr_60y,
'F9-0 (t-C)': emissions_NonRW_S1_Tgr_60y,
})
##S1_E_Hbr_40y
## define the input flow for the landfill (F5-7)
OC_storage_E_Hbr40 = dfE_Hbr40['Other_C_storage'].values
OC_storage_E_Hbr40 = [x/division for x in OC_storage_E_Hbr40]
OC_storage_E_Hbr40 = [abs(number) for number in OC_storage_E_Hbr40]
C_LF_E_Hbr40 = [x*1/0.82 for x in OC_storage_E_Hbr40]
## define the input flow from the logging/harvesting to wood materials/pellets processing (F2-3)
HWP_E_Hbr40 = [x/division for x in dfE_Hbr40['Wood_pellets'].values]
HWP_E_Hbr40_energy = [x*1/3 for x in c_firewood_energy_E_Hbr40]
HWP_E_Hbr40_landfill = [x*1/0.82 for x in OC_storage_E_Hbr40]
HWP_E_Hbr40_sum = [HWP_E_Hbr40, HWP_E_Hbr40_energy, HWP_E_Hbr40_landfill]
HWP_E_Hbr40_sum = [sum(x) for x in zip(*HWP_E_Hbr40_sum )]
## in-use stocks (S-4)
TestDSME_Hbr40.s = [x/division for x in TestDSME_Hbr40.s]
## calculate C stocks in landfill (S-7)
tf = 201
zero_matrix_stocks_E_Hbr_40y = (tf,1)
stocks_E_Hbr_40y = np.zeros(zero_matrix_stocks_E_Hbr_40y)
i = 0
stocks_E_Hbr_40y[0] = C_LF_E_Hbr40[0] - Landfill_decomp_DL_FP_E_Hbr_40y[0]
while i < tf-1:
stocks_E_Hbr_40y[i+1] = np.array(C_LF_E_Hbr40[i+1] - Landfill_decomp_DL_FP_E_Hbr_40y[i+1] + stocks_E_Hbr_40y[i])
i = i + 1
## calculate aggregate flow of logged wood (F1-2)
HWP_logged_E_Hbr_40y = [x1+x2 for (x1,x2) in zip(HWP_E_Hbr40_sum, [x*2/3 for x in c_firewood_energy_E_Hbr40])]
#calculate the stocks in the forest (AGB + undecomposed residue) (S-1a+S-1c)
tf = 201
zero_matrix_ForCstocks_E_Hbr_40y = (tf,1)
ForCstocks_E_Hbr_40y = np.zeros(zero_matrix_ForCstocks_E_Hbr_40y)
i = 0
ForCstocks_E_Hbr_40y[0] = initAGB - flat_list_Hbr_40y[0] - decomp_tot_E_Hbr_40y[0] - HWP_logged_E_Hbr_40y[0]
while i < tf-1:
ForCstocks_E_Hbr_40y[i+1] = np.array(ForCstocks_E_Hbr_40y[i] - flat_list_Hbr_40y[i+1] - decomp_tot_E_Hbr_40y[i+1] - HWP_logged_E_Hbr_40y[i+1])
i = i + 1
##NonRW materials/energy amount (F9-0-1)
dfE_amount_Hbr40 = pd.read_excel('C:\\Work\\Programming\\Practice\\NonRW_DL_FP.xlsx', 'DL_FP_E_Hbr_40y')
NonRW_amount_E_Hbr_40y = dfE_amount_Hbr40['NonRW_amount'].values
NonRW_amount_E_Hbr_40y = [x/1000 for x in NonRW_amount_E_Hbr_40y]
##NonRW emissions (F9-0-2)
emissions_NonRW_E_Hbr_40y = [x/division for x in emissions_NonRW_E_Hbr_40y]
#create columns
dfE_Hbr_40y = pd.DataFrame.from_dict({'Year':Column1,
'F0-1 (t-C)': flat_list_Hbr_40y,
'F1-0 (t-C)': decomp_tot_E_Hbr_40y,
#'F1a-2 (t-C)': PF_S2_Tgr_60y,
#'F1c-2 (t-C)': FP_S2_Tgr_60y,
'F1-2 (t-C)': HWP_logged_E_Hbr_40y,
'St-1 (t-C)':ForCstocks_E_Hbr_40y[:,0],
'F2-3 (t-C)': HWP_E_Hbr40_sum,
'F2-6 (t-C)': [x*2/3 for x in c_firewood_energy_E_Hbr40],
'SM/E (t-C)': [x1-x2-x3 for (x1,x2,x3) in zip(HWP_E_Hbr40_sum, [x*1/0.82 for x in OC_storage_E_Hbr40], [x*1/3 for x in c_firewood_energy_E_Hbr40])],
'F3-5 (t-C)':[x*1/0.82 for x in OC_storage_E_Hbr40],
'F3-6 (t-C)': [x*1/3 for x in c_firewood_energy_E_Hbr40],
'F4-0 (t-C)': c_pellets_Hbr_40y,
'St-4 (t-C)': TestDSME_Hbr40.s,
#'S-4-i (t-C)': TestDSME_Hbr40.i,
'F4-5 (t-C)': TestDSME_Hbr40.o,
'F5-6 (t-C)': TestDSME_Hbr40.o,
'F5-7 (t-C)': C_LF_E_Hbr40,
'F6-0-1 (t-C)': c_firewood_energy_E_Hbr40,
'F6-0-2 (t-C)': TestDSME_Hbr40.o,
'St-7 (t-C)': stocks_E_Hbr_40y[:,0],
'F7-0 (t-C)': Landfill_decomp_DL_FP_E_Hbr_40y,
'F8-0 (t-C)': PH_Emissions_HWPE_Hbr_40y,
'S9-0 (t)': NonRW_amount_E_Hbr_40y,
'F9-0 (t-C)': emissions_NonRW_E_Hbr_40y,
})
writer = pd.ExcelWriter('C_flows_SysDef_DL_FP_EC_RB.xlsx', engine = 'xlsxwriter')
dfM_Ac_7y.to_excel(writer, sheet_name = 'DL_FP_M_EC_Ac_7y', header=True, index=False)
dfM_Ac_18y.to_excel(writer, sheet_name = 'DL_FP_M_EC_Ac_18y', header=True, index=False)
dfM_Tgr_60y.to_excel(writer, sheet_name = 'DL_FP_M_EC_Tgr_60y', header=True, index=False)
dfE_Hbr_40y.to_excel(writer, sheet_name = 'DL_FP_E_EC_Hbr_40y', header=True, index=False)
writer.save()
writer.close()
#%% | [
"noreply@github.com"
] | noreply@github.com |
37d5fa2a6f6b3325e6960b512dbb88914fa86b99 | b2ba670818623f8ab18162382f7394baed97b7cb | /test-data/AndroidSlicer/Mitzuli/DD/10.py | d2f993240a11bbf9562d22402b0c95323cee8d8d | [
"MIT"
] | permissive | hsumyatwin/ESDroid-artifact | 012c26c40537a79b255da033e7b36d78086b743a | bff082c4daeeed62ceda3d715c07643203a0b44b | refs/heads/main | 2023-04-11T19:17:33.711133 | 2022-09-30T13:40:23 | 2022-09-30T13:40:23 | 303,378,286 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,184 | py | #start monkey test seedNo 0
import os;
from subprocess import Popen
from subprocess import PIPE
from com.android.monkeyrunner import MonkeyRunner, MonkeyDevice, MonkeyImage
from com.android.monkeyrunner.MonkeyDevice import takeSnapshot
from com.android.monkeyrunner.easy import EasyMonkeyDevice
from com.android.monkeyrunner.easy import By
from com.android.chimpchat.hierarchyviewer import HierarchyViewer
from com.android.monkeyrunner import MonkeyView
import random
import sys
import subprocess
from sys import exit
from random import randint
device = MonkeyRunner.waitForConnection()
package = 'com.mitzuli'
activity ='com.mitzuli.MainActivity'
runComponent = package+'/'+activity
device.startActivity(component=runComponent)
MonkeyRunner.sleep(0.5)
MonkeyRunner.sleep(0.5)
device.touch(1300,113, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(1020,121, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(1001,127, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(863,125, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(355,1601, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(247,1839, 'DOWN_AND_UP')
MonkeyRunner.sleep(0.5)
device.touch(80,154, 'DOWN_AND_UP')
| [
"hsumyatwin@gmail.com"
] | hsumyatwin@gmail.com |
d80f1035fe26c828d8c7307112f4fa58542caa74 | 2192f4e3843c796c5208409337199946f7036079 | /Django_projects/User/Users/usersapp/migrations/0002_auto_20210520_1143.py | 361f9e55d0e344121ee345aa6fee1f04bb73179c | [] | no_license | GhaithAssaf9/Python | 370f84e1ca7d8076f066f6bd0fdae7007aac3dd3 | 84c84ee4463112b0a8ab08787f139fea3cbf5cf5 | refs/heads/main | 2023-06-25T10:16:14.022594 | 2021-08-03T08:41:31 | 2021-08-03T08:41:31 | 364,543,192 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | # Generated by Django 2.2.4 on 2021-05-20 08:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('usersapp', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='users',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=45)),
('last_name', models.CharField(max_length=45)),
('age', models.DateField()),
('email', models.CharField(max_length=45)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.DeleteModel(
name='Moviecopy',
),
]
| [
"urnostalgic@gmail.com"
] | urnostalgic@gmail.com |
9a33605fc91c2d1ce22db31b4e9669455ca00773 | 8235076c125e5f69188917da520669b89dfdd350 | /user/migrations/0006_anfiteatro_arlivre_atividadehasmaterial_authgroup_authgrouppermissions_authpermission_authuser_authu.py | b15d71b7e5c59cc370d9b70ca5c39c80eab89621 | [] | no_license | guilhascorreia24/componente-Utilizador | 37b319daeb9fd7174db24d2616f6ed833963aafd | 3aae759e7a0961b95d8502e8163efef91e0471d4 | refs/heads/master | 2021-08-10T08:47:39.092791 | 2020-07-03T12:11:36 | 2020-07-03T12:11:36 | 247,350,427 | 0 | 1 | null | 2021-03-31T19:59:17 | 2020-03-14T20:43:53 | CSS | UTF-8 | Python | false | false | 15,937 | py | # Generated by Django 3.0.4 on 2020-04-27 22:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('user', '0005_atividade_dia_escola_espaco_inscricao_inscricaocoletiva_inscricaoindividual_tarefa'),
]
operations = [
migrations.CreateModel(
name='Anfiteatro',
fields=[
('edificio', models.CharField(max_length=45)),
('andar', models.CharField(max_length=45)),
('espaco_idespaco', models.OneToOneField(db_column='espaco_idespaco', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Espaco')),
],
options={
'db_table': 'anfiteatro',
'managed': False,
},
),
migrations.CreateModel(
name='Arlivre',
fields=[
('descricao', models.CharField(max_length=255)),
('espaco_idespaco', models.OneToOneField(db_column='espaco_idespaco', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Espaco')),
],
options={
'db_table': 'arlivre',
'managed': False,
},
),
migrations.CreateModel(
name='AtividadeHasMaterial',
fields=[
('atividade_idatividade', models.OneToOneField(db_column='Atividade_idAtividade', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Atividade')),
],
options={
'db_table': 'atividade_has_material',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, unique=True)),
],
options={
'db_table': 'auth_group',
'managed': False,
},
),
migrations.CreateModel(
name='AuthGroupPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_group_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='AuthPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('codename', models.CharField(max_length=100)),
],
options={
'db_table': 'auth_permission',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128)),
('last_login', models.DateTimeField(blank=True, null=True)),
('is_superuser', models.IntegerField()),
('username', models.CharField(max_length=150, unique=True)),
('first_name', models.CharField(max_length=30)),
('last_name', models.CharField(max_length=150)),
('email', models.CharField(max_length=254)),
('is_staff', models.IntegerField()),
('is_active', models.IntegerField()),
('date_joined', models.DateTimeField()),
],
options={
'db_table': 'auth_user',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserGroups',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_groups',
'managed': False,
},
),
migrations.CreateModel(
name='AuthUserUserPermissions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
options={
'db_table': 'auth_user_user_permissions',
'managed': False,
},
),
migrations.CreateModel(
name='ColaboradorHasHorario',
fields=[
('colaborador_has_horario_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'colaborador_has_horario',
'managed': False,
},
),
migrations.CreateModel(
name='ColaboradorHasUnidadeOrganica',
fields=[
('colaborador_has_unidade_organica_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'colaborador_has_unidade_organica',
'managed': False,
},
),
migrations.CreateModel(
name='CoordenadorHasDepartamento',
fields=[
('coordenador_utilizador_idutilizador', models.OneToOneField(db_column='Coordenador_Utilizador_idutilizador', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Coordenador')),
],
options={
'db_table': 'coordenador_has_departamento',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoAdminLog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action_time', models.DateTimeField()),
('object_id', models.TextField(blank=True, null=True)),
('object_repr', models.CharField(max_length=200)),
('action_flag', models.PositiveSmallIntegerField()),
('change_message', models.TextField()),
],
options={
'db_table': 'django_admin_log',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoContentType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100)),
],
options={
'db_table': 'django_content_type',
'managed': False,
},
),
migrations.CreateModel(
name='DjangoMigrations',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('app', models.CharField(max_length=255)),
('name', models.CharField(max_length=255)),
('applied', models.DateTimeField()),
],
options={
'db_table': 'django_migrations',
'managed': False,
},
),
migrations.CreateModel(
name='Horario',
fields=[
('hora', models.TimeField(primary_key=True, serialize=False)),
],
options={
'db_table': 'horario',
'managed': False,
},
),
migrations.CreateModel(
name='HorarioHasDia',
fields=[
('id_dia_hora', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'horario_has_dia',
'managed': False,
},
),
migrations.CreateModel(
name='Idioma',
fields=[
('nome', models.CharField(max_length=255, primary_key=True, serialize=False)),
('sigla', models.CharField(max_length=45, unique=True)),
],
options={
'db_table': 'idioma',
'managed': False,
},
),
migrations.CreateModel(
name='InscricaoHasPrato',
fields=[
('inscricao_has_prato_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'inscricao_has_prato',
'managed': False,
},
),
migrations.CreateModel(
name='InscricaoHasSessao',
fields=[
('inscricao_has_sessao_id', models.AutoField(primary_key=True, serialize=False)),
('nr_inscritos', models.IntegerField()),
],
options={
'db_table': 'inscricao_has_sessao',
'managed': False,
},
),
migrations.CreateModel(
name='Material',
fields=[
('idmaterial', models.AutoField(db_column='idMaterial', primary_key=True, serialize=False)),
('descricao', models.CharField(max_length=255)),
],
options={
'db_table': 'material',
'managed': False,
},
),
migrations.CreateModel(
name='Menu',
fields=[
('idmenu', models.AutoField(db_column='idMenu', primary_key=True, serialize=False)),
('tipo', models.CharField(max_length=45)),
('menu', models.CharField(max_length=45)),
('nralmocosdisponiveis', models.IntegerField()),
],
options={
'db_table': 'menu',
'managed': False,
},
),
migrations.CreateModel(
name='Notificacao',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('descricao', models.CharField(max_length=255)),
('criadoem', models.DateTimeField()),
('idutilizadorenvia', models.IntegerField()),
('utilizadorrecebe', models.IntegerField()),
],
options={
'db_table': 'notificacao',
'managed': False,
},
),
migrations.CreateModel(
name='Paragem',
fields=[
('paragem', models.CharField(max_length=45, primary_key=True, serialize=False)),
],
options={
'db_table': 'paragem',
'managed': False,
},
),
migrations.CreateModel(
name='Prato',
fields=[
('idprato', models.AutoField(db_column='idPrato', primary_key=True, serialize=False)),
('nralmocos', models.IntegerField()),
('descricao', models.CharField(max_length=125)),
],
options={
'db_table': 'prato',
'managed': False,
},
),
migrations.CreateModel(
name='Responsaveis',
fields=[
('idresponsavel', models.AutoField(primary_key=True, serialize=False)),
('nome', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('telefone', models.CharField(max_length=45)),
],
options={
'db_table': 'responsaveis',
'managed': False,
},
),
migrations.CreateModel(
name='Sala',
fields=[
('edificio', models.CharField(max_length=45)),
('andar', models.CharField(max_length=45)),
('gabinete', models.CharField(blank=True, max_length=45, null=True)),
('espaco_idespaco', models.OneToOneField(db_column='espaco_idespaco', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Espaco')),
],
options={
'db_table': 'sala',
'managed': False,
},
),
migrations.CreateModel(
name='Sessao',
fields=[
('idsessao', models.AutoField(primary_key=True, serialize=False)),
('nrinscritos', models.IntegerField()),
('vagas', models.IntegerField()),
],
options={
'db_table': 'sessao',
'managed': False,
},
),
migrations.CreateModel(
name='SessaoHasHorarioHasDia',
fields=[
('sessao_has_horario_has_dia_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'sessao_has_horario_has_dia',
'managed': False,
},
),
migrations.CreateModel(
name='Transporte',
fields=[
('idtransporte', models.AutoField(primary_key=True, serialize=False)),
('capacidade', models.IntegerField()),
('identificacao', models.CharField(max_length=255)),
],
options={
'db_table': 'transporte',
'managed': False,
},
),
migrations.CreateModel(
name='TransporteHasHorario',
fields=[
('id_transporte_has_horario', models.IntegerField(primary_key=True, serialize=False)),
],
options={
'db_table': 'transporte_has_horario',
'managed': False,
},
),
migrations.CreateModel(
name='TransporteHasInscricao',
fields=[
('transporte_has_inscricao_id', models.AutoField(primary_key=True, serialize=False)),
('numero_passageiros', models.IntegerField(blank=True, null=True)),
],
options={
'db_table': 'transporte_has_inscricao',
'managed': False,
},
),
migrations.CreateModel(
name='UtilizadorHasNotificacao',
fields=[
('utilizador_has_notificacao_id', models.AutoField(primary_key=True, serialize=False)),
],
options={
'db_table': 'utilizador_has_notificacao',
'managed': False,
},
),
migrations.CreateModel(
name='TransportePessoal',
fields=[
('transporte_idtransporte', models.OneToOneField(db_column='transporte_idtransporte', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Transporte')),
],
options={
'db_table': 'transporte_pessoal',
'managed': False,
},
),
migrations.CreateModel(
name='TransporteUniversitario',
fields=[
('capacidade', models.IntegerField()),
('transporte_idtransporte', models.OneToOneField(db_column='transporte_idtransporte', on_delete=django.db.models.deletion.DO_NOTHING, primary_key=True, serialize=False, to='user.Transporte')),
],
options={
'db_table': 'transporte_universitario',
'managed': False,
},
),
]
| [
"brunosusana99@hotmail.com"
] | brunosusana99@hotmail.com |
bb363c5ddd3739e93a04900c1353f55c9f17c3ab | 923f9270a12be35fdd297d8f27e522c601e94eab | /src/decay/test/test_dc_nose.py | 00a9741044a433b8333c1da2f59dfc64f2536274 | [] | no_license | t-bltg/INF5620 | a06b6e06b6aba3bc35e933abd19c58cd78584c1f | d3e000462302839b49693cfe06a2f2df924c5027 | refs/heads/master | 2021-05-31T00:41:41.624838 | 2016-03-22T09:29:00 | 2016-03-22T09:29:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,328 | py | import nose.tools as nt
import sys, os
sys.path.insert(0, os.pardir)
import dc_mod_unittest as dc_mod
import numpy as np
def exact_discrete_solution(n, I, a, theta, dt):
"""Return exact discrete solution of the theta scheme."""
dt = float(dt) # avoid integer division
factor = (1 - (1-theta)*a*dt)/(1 + theta*dt*a)
return I*factor**n
def test_against_discrete_solution():
"""
Compare result from solver against
formula for the discrete solution.
"""
theta = 0.8; a = 2; I = 0.1; dt = 0.8
N = int(8/dt) # no of steps
u, t = dc_mod.solver(I=I, a=a, T=N*dt, dt=dt, theta=theta)
u_de = np.array([exact_discrete_solution(n, I, a, theta, dt)
for n in range(N+1)])
diff = np.abs(u_de - u).max()
nt.assert_almost_equal(diff, 0, delta=1E-14)
def test_solver():
"""
Compare result from solver against
precomputed arrays for theta=0, 0.5, 1.
"""
I=0.8; a=1.2; T=4; dt=0.5 # fixed parameters
precomputed = {
't': np.array([ 0. , 0.5, 1. , 1.5, 2. , 2.5,
3. , 3.5, 4. ]),
0.5: np.array(
[ 0.8 , 0.43076923, 0.23195266, 0.12489759,
0.06725255, 0.03621291, 0.01949926, 0.0104996 ,
0.00565363]),
0: np.array(
[ 8.00000000e-01, 3.20000000e-01,
1.28000000e-01, 5.12000000e-02,
2.04800000e-02, 8.19200000e-03,
3.27680000e-03, 1.31072000e-03,
5.24288000e-04]),
1: np.array(
[ 0.8 , 0.5 , 0.3125 , 0.1953125 ,
0.12207031, 0.07629395, 0.04768372, 0.02980232,
0.01862645]),
}
for theta in 0, 0.5, 1:
u, t = dc_mod.solver(I, a, T, dt, theta=theta)
diff = np.abs(u - precomputed[theta]).max()
# Precomputed numbers are known to 8 decimal places
nt.assert_almost_equal(diff, 0, places=8,
msg='theta=%s' % theta)
def test_potential_integer_division():
"""Choose variables that can trigger integer division."""
theta = 1; a = 1; I = 1; dt = 2
N = 4
u, t = dc_mod.solver(I=I, a=a, T=N*dt, dt=dt, theta=theta)
u_de = np.array([exact_discrete_solution(n, I, a, theta, dt)
for n in range(N+1)])
diff = np.abs(u_de - u).max()
nt.assert_almost_equal(diff, 0, delta=1E-14)
def test_convergence_rates():
"""Compare empirical convergence rates to exact ones."""
# Set command-line arguments directly in sys.argv
sys.argv[1:] = '--I 0.8 --a 2.1 --T 5 '\
'--dt 0.4 0.2 0.1 0.05 0.025'.split()
# Suppress output from dc_mod.main()
stdout = sys.stdout # save standard output for later use
scratchfile = open('.tmp', 'w') # fake standard output
sys.stdout = scratchfile
r = dc_mod.main()
for theta in r:
nt.assert_true(r[theta]) # check for non-empty list
scratchfile.close()
sys.stdout = stdout # restore standard output
expected_rates = {0: 1, 1: 1, 0.5: 2}
for theta in r:
r_final = r[theta][-1]
# Compare to 1 decimal place
nt.assert_almost_equal(expected_rates[theta], r_final,
places=1, msg='theta=%s' % theta)
# no need for any main
| [
"hpl@simula.no"
] | hpl@simula.no |
af4bf8c4de3811ea5faf7ca96cca9df2fd75cb2c | c1ab0f3ccd1ae6f59a80bfcc3c2caaeed2868ba8 | /django_medcheck/django_medcheck/settings.py | ac4fdda695b1c79fd2e1d3ba6cbd6894cbb0a6ac | [] | no_license | antz22/MedCheck | 0a91945f63c850b429f829b1e12e242f885ffdb9 | b75d44b13492f3dcac641504544eea3ef2e3754f | refs/heads/master | 2023-05-31T00:00:01.905437 | 2021-06-14T12:21:12 | 2021-06-14T12:21:12 | 376,289,998 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,565 | py | """
Django settings for django_medcheck project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-0eqpfmdz_f2!i^d6$jpi$wroo3oj)py2(wc+=ak+mqr^+xo7i-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'corsheaders',
'djoser',
'core',
]
CORS_ALLOWED_ORIGINS = [
"http://localhost:8080"
]
CORS_ORIGIN_WHITELIST = [
"http://localhost:8080"
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_medcheck.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_medcheck.wsgi.application'
AUTH_USER_MODEL = 'core.User'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
| [
"anthonyznj@gmail.com"
] | anthonyznj@gmail.com |
c33fb73c3f175c6bb75340690b33f4e382136492 | 97d51839b27ce11bd1302d593ffba330da3234d9 | /WeatherForecastApp/mysite/webapp/models.py | 9245aa65546550c6b4ed64172d5dc894a5a0e8ef | [] | no_license | adityagurram/CloudComputing | dd4015c71892062b470be9104eb9189b4c38ebf1 | 667e72cb53ddd721ecb69ca71d3804eb1a7ee94d | refs/heads/master | 2020-03-07T03:09:29.570179 | 2018-03-29T03:43:19 | 2018-03-29T03:43:19 | 127,227,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 227 | py | from django.db import models
class Climate(models.Model):
DATE= models.IntegerField(unique=True)
TMAX= models.FloatField(null=True, blank=True, default=None,)
TMIN=models.FloatField(null=True, blank=True, default=None) | [
"noreply@github.com"
] | noreply@github.com |
361bcd8554afe3ab13ba6067f3468a34e6a3fba4 | 15c86f80f0009118f8e1bd01d866cfdeeb00fbb4 | /assignment2/sgd.py | f2a753e685a8dad8799f774a1ba6127d4a616556 | [] | no_license | Baekyeongmin/2019_cs224n | 1680c67e399df69be3513b66f97d88b98a55831e | bed832a65dc3df0bb8b2f3cff41fe58ebdb12901 | refs/heads/master | 2020-05-05T03:30:44.290736 | 2019-06-09T05:24:13 | 2019-06-09T05:24:13 | 179,675,422 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | #!/usr/bin/env python
# Save parameters every a few SGD iterations as fail-safe
SAVE_PARAMS_EVERY = 5000
import pickle
import glob
import random
import numpy as np
import os.path as op
def load_saved_params():
"""
A helper function that loads previously saved parameters and resets
iteration start.
"""
st = 0
for f in glob.glob("saved_params_*.npy"):
iter = int(op.splitext(op.basename(f))[0].split("_")[2])
if (iter > st):
st = iter
if st > 0:
params_file = "saved_params_%d.npy" % st
state_file = "saved_state_%d.pickle" % st
params = np.load(params_file)
with open(state_file, "rb") as f:
state = pickle.load(f)
return st, params, state
else:
return st, None, None
def save_params(iter, params):
params_file = "saved_params_%d.npy" % iter
np.save(params_file, params)
with open("saved_state_%d.pickle" % iter, "wb") as f:
pickle.dump(random.getstate(), f)
def sgd(f, x0, step, iterations, postprocessing=None, useSaved=False,
PRINT_EVERY=10):
""" Stochastic Gradient Descent
Implement the stochastic gradient descent method in this function.
Arguments:
f -- the function to optimize, it should take a single
argument and yield two outputs, a loss and the gradient
with respect to the arguments
x0 -- the initial point to start SGD from
step -- the step size for SGD
iterations -- total iterations to run SGD for
postprocessing -- postprocessing function for the parameters
if necessary. In the case of word2vec we will need to
normalize the word vectors to have unit length.
PRINT_EVERY -- specifies how many iterations to output loss
Return:
x -- the parameter value after SGD finishes
"""
# Anneal learning rate every several iterations
ANNEAL_EVERY = 20000
if useSaved:
start_iter, oldx, state = load_saved_params()
if start_iter > 0:
x0 = oldx
step *= 0.5 ** (start_iter / ANNEAL_EVERY)
if state:
random.setstate(state)
else:
start_iter = 0
x = x0
if not postprocessing:
postprocessing = lambda x: x
exploss = None
for iter in range(start_iter + 1, iterations + 1):
# You might want to print the progress every few iterations.
loss = None
### YOUR CODE HERE
loss, grad = f(x)
x -= step * grad
### END YOUR CODE
x = postprocessing(x)
if iter % PRINT_EVERY == 0:
if not exploss:
exploss = loss
else:
exploss = .95 * exploss + .05 * loss
print("iter %d: %f" % (iter, exploss))
if iter % SAVE_PARAMS_EVERY == 0 and useSaved:
save_params(iter, x)
if iter % ANNEAL_EVERY == 0:
step *= 0.5
return x
def sanity_check():
quad = lambda x: (np.sum(x ** 2), x * 2)
print("Running sanity checks...")
t1 = sgd(quad, 0.5, 0.01, 1000, PRINT_EVERY=100)
print("test 1 result:", t1)
assert abs(t1) <= 1e-6
t2 = sgd(quad, 0.0, 0.01, 1000, PRINT_EVERY=100)
print("test 2 result:", t2)
assert abs(t2) <= 1e-6
t3 = sgd(quad, -1.5, 0.01, 1000, PRINT_EVERY=100)
print("test 3 result:", t3)
assert abs(t3) <= 1e-6
print("-" * 40)
print("ALL TESTS PASSED")
print("-" * 40)
if __name__ == "__main__":
sanity_check()
| [
"bym0313@dgist.ac.kr"
] | bym0313@dgist.ac.kr |
d2aaff5adf1ae65ce0bd9633a7c3a6ac774391e4 | f77ceeba8b499be7886dca264108688f2acbe11c | /lstm.py | 4db31ed38a2681666ed3f8f2af658fea0b2207c8 | [] | no_license | TATlong/Research-report-Classification-system | 066960be9b340537968866b83611e335898b3024 | 4bbe39964cc87898f7ef1b87b05bc02129d1a4b2 | refs/heads/master | 2021-12-21T11:34:33.765297 | 2021-12-10T08:55:13 | 2021-12-10T08:55:13 | 157,339,220 | 100 | 46 | null | null | null | null | UTF-8 | Python | false | false | 8,112 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 30 22:28:41 2018
@author: lilong
"""
from interface import Interface_base
import numpy as np
import pandas as pd
import sys,os
import yaml
from sklearn.cross_validation import train_test_split
from gensim.models.word2vec import Word2Vec
from gensim.corpora.dictionary import Dictionary
from keras.utils import np_utils
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.layers.core import Dense, Dropout,Activation
from keras.models import model_from_yaml
from keras.layers import Flatten
from sklearn.preprocessing import LabelEncoder
class Lstm_nn(Interface_base):
def __init__(self): # 初始化父类
Interface_base.__init__(self)
# 该函数初步处理训练数据
def splice_data(self,path):
sp=np.array([]) # 读取正面样本
pathdir = os.listdir(path)
for pf in pathdir:
newdir = os.path.join(path, pf) # 获取的文件路径
print('newdir:',newdir)
# 之这里是‘gbk’的编码,因为人工分类的文本保存用的windows系统
# 如果人工分类时是mac,那就改成utf-8的编码
with open(newdir, "r", encoding='gbk') as f:
tmp=''
lines = f.readlines()
for line in lines:
line=line.strip()
line.replace(' ', '')
tmp=tmp+line
sp=np.append(sp,tmp)
return sp
# 拼接训练文件
def load_train_file(self):
pos=self.splice_data(self.pos_path) # 拼接正样本
neu=self.splice_data(self.neu_path) # 拼接负样本
neg=self.splice_data(self.neg_path) # 拼接负样本
combined=np.concatenate((pos,neu,neg)) # 正和负样本文本的拼接
pos_array = np.array([-1]*len(pos),dtype=int)
neu_array = np.array([0]*len(neu),dtype=int)
neg_array = np.array([1]*len(neg),dtype=int)
y = np.concatenate((pos_array, neu_array,neg_array)) # 正、中性、fu2标签的拼接
print(len(y))
return combined,y
# 得到每篇文本在词典中的索引列表,不同的文本长度不同,所以列表长度也不同
def parse_dataset(self,combined):
w2indx=np.load(self.word_index)
w2indx=w2indx['dic'][()] # 必须这种形式读取保存的字典
w2vec=np.load(self.word_vec)
w2vec=w2vec['dic'][()]
data=[]
for text in combined:
new_txt = []
for word in text:
try:
new_txt.append(w2indx[word])
except:
new_txt.append(0)
data.append(new_txt)
#print(len(data[0]),len(data[1]))
return w2indx,w2vec,data
# lstm模型训练数据的结构化
def train_data_struc(self,combined):
w2indx,w2vec,struc_w2index=self.parse_dataset(combined) # 在这里是不等长的数列
# 得到每篇文本所含的词语对应的索引:后端截断并且填0补充
struc_w2index= sequence.pad_sequences(struc_w2index, maxlen=self.maxlen,padding='post',truncating='post')
return w2indx,w2vec,struc_w2index
# index_dict:所有的词索引列表(词:索引), word_vectors:所有词的词向量, combined:所有文本的索引值
def get_train_data(self,word_index,word_vectors,struc_w2index,y):
n_symbols = len(word_index) + 1 # 词典的大小
embedding_weights = np.zeros((n_symbols, self.vocab_dim)) # 索引为0的词语,词向量全为0
# 从索引为1的词语开始,每个词语对应其词向量形成词向量矩阵
for word, index in word_index.items():
embedding_weights[index, :] = word_vectors[word]
#print('embedding_weights:',embedding_weights[:2])
print(len(struc_w2index),len(y))
x_train, x_test, y_train, y_test = train_test_split(struc_w2index, y, test_size=self.test_size)
#print(y_train, y_test)
# 分类标签-1,0,1转化为0,1,2
encoder = LabelEncoder()
encoded_y_train = encoder.fit_transform(y_train)
encoded_y_test = encoder.fit_transform(y_test)
#print(encoded_y_train,encoded_y_test)
# one-hot编码:-1=0=[1. 0. 0]; 0=1=[0. 1. 0.]; 1=2=[0. 0. 1.]
y_train = np_utils.to_categorical(encoded_y_train)
y_test = np_utils.to_categorical(encoded_y_test)
#print (y_train,y_test)
return n_symbols,embedding_weights,x_train,y_train,x_test,y_test
# 定义网络结构
def train_lstm(self,n_symbols,embedding_weights,x_train,y_train,x_test,y_test):
nb_classes=3
print ('Defining a Simple Keras Model...')
model = Sequential()
model.add(Embedding(output_dim=self.vocab_dim, # 每个词的词向量维度
input_dim=n_symbols, # 所有的词的长度加1
mask_zero=True, # 确定是否将输入中的‘0’看作是应该被忽略的‘填充’(padding)值
weights=[embedding_weights], # 词向量矩阵
input_length=self.input_length)) # 当输入序列的长度固定时,该值为其长度
'''二分类
### keras层的参数设置
model.add(LSTM(output_dim=50, activation='sigmoid', inner_activation='hard_sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
print ('Compiling the Model...')
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
'''
# 三分类
## 使用单层LSTM 输出的向量维度是50,输入的向量维度是vocab_dim,激活函数relu
model.add(LSTM(output_dim=50, activation='relu', inner_activation='hard_sigmoid'))
model.add(Dropout(0.5))
## 在这里外接softmax,进行最后的3分类
model.add(Dense(output_dim=nb_classes, input_dim=50, activation='softmax'))
# 开始训练
print ('Compiling the Model...')
model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
print ("Train...")
model.fit(x_train, y_train, batch_size=self.batch_size, epochs=self.n_epoch,\
verbose=1, validation_data=(x_test, y_test))
print ("Evaluate...")
score = model.evaluate(x_test, y_test,batch_size=self.batch_size)
yaml_string = model.to_yaml()
with open(self.lstm_model, 'w') as outfile:
outfile.write( yaml.dump(yaml_string, default_flow_style=True) )
model.save_weights(self.lstm_weight)
print ('Test score:', score)
# 训练模型,并保存
def train(self):
print ('Loading train Data...')
combined,y=self.load_train_file() # combined是正、中性、负样本,y是标签
print ('Tokenising...')
combined = self.tokenizer(combined) #tokenizer()是分词并处理空格的函数
print(len(combined))
print ('Training a Word2vec model...')
w2indx,w2vec,struc_w2index=self.train_data_struc(combined)
print ('Setting up Arrays for Keras Embedding Layer...')
n_symbols,embedding_weights,x_train,y_train,x_test,y_test=self.get_train_data(w2indx,w2vec,struc_w2index,y)
print (x_train.shape,y_train.shape)
self.train_lstm(n_symbols,embedding_weights,x_train,y_train,x_test,y_test)
'''
mm=Lstm_nn()
mm.train()
'''
| [
"34054731+TATlong@users.noreply.github.com"
] | 34054731+TATlong@users.noreply.github.com |
3cc4baa6ce409ef2fef25d43ae16372d88412de4 | 25692e58dceec1f5be4c7930d353bacafd3ff7b0 | /binary/랜선.py | 428c52c11757090ed3d5b84ea1660cc38c993943 | [] | no_license | ub1n/Algorithm | a8617fc56d934e99370c367af364f308431423d6 | c9761941082b678a2882d04db8887afb0d664737 | refs/heads/master | 2023-06-11T11:11:52.573748 | 2021-07-02T13:32:09 | 2021-07-02T13:32:09 | 375,415,927 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | import sys
n=list(map(int,sys.stdin.readline().split()))
arr=[]
for i in range(n[0]):
m=int(sys.stdin.readline())
arr.append(m)
start=1
end=max(arr)
ans=[]
while(start<=end):
mid=(start+end)//2
temp=sum([i//mid for i in arr])
if temp>=n[1]:
ans.append(mid)
start=mid+1
else:
end=mid-1
print(max(ans)) | [
"bin951024@naver.com"
] | bin951024@naver.com |
f6969149986c94f6addf9e40a89a24a01d513ec8 | 84bcda4ff3a1c2c956c7814f3a308ba68d697563 | /python/GETDownload1.py | 3954d7b356b4b32c016bd413695f85aa213f5bf1 | [] | no_license | yijiyouyu/code | 7a9db849d3734169ba80f029ca74d6962ecd71b9 | f4bc6a4124243484c2d17fb3a574da5e7a31ca11 | refs/heads/master | 2021-09-17T22:54:13.967963 | 2018-07-06T08:26:17 | 2018-07-06T08:26:17 | 109,633,819 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | #coding:utf-8
from requests import get
from time import time
import sys
def Usage():
print '[Usage]:\nGETDownload1.py [URL]'
def getHTML(URL):
html = get(URL).text
return html
def getName():
name = str(time())
return name
def saveFile(Fname,Fdata):
f = open(Fname,'w')
f.write(Fdata)
f.close()
if __name__=='__main__':
try:
reload(sys)
sys.setdefaultencoding('utf8')
URL = sys.argv[1]
html = getHTML(URL)
name = getName()
saveFile(name+'.txt',html)
except:
Usage() | [
"1147121947@qq.com"
] | 1147121947@qq.com |
8f0dd18ff0e2846a87a5f2ca82b2163c648938b6 | 2479345dafbf0ac1118f34fbd3471871a3ac5c11 | /demo/libdemo/list_countries.py | 9292611d6422dfbe06ee3e2c9b7058f6e10a215d | [] | no_license | srikanthpragada/PYTHON_06_MAY_2021 | e2fc4d32a38f085658f87d35f31df65ee837a440 | f30a3c4541e0fc15d157446721b514f791602919 | refs/heads/master | 2023-06-02T23:13:53.786444 | 2021-06-16T03:00:38 | 2021-06-16T03:00:38 | 365,402,518 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | import requests
resp = requests.get("https://restcountries.eu/rest/v2/all")
if resp.status_code != 200:
print('Sorry! Could not get details!')
exit(1)
countries = resp.json()
for c in countries:
print(f"{c['name']:50} - {c['capital']}") | [
"srikanthpragada@gmail.com"
] | srikanthpragada@gmail.com |
0cc1a592e15de782740aa4548d5a1da9c94b242a | 21b7670ce56d6cb41f609a09f26f460150cbbb29 | /scripts/antennaset.py | 773119bd18e95b90ea6eb507dfb468615b349e51 | [] | no_license | transientskp/old-aartfaac-imaging-pipeline | fd82c739b9b2670e3b2f6cf05f97f2ea168800e6 | 64456796a56cf5e667170e6336dbdcf9cd07f9ba | refs/heads/master | 2022-07-07T08:48:36.192471 | 2016-06-16T08:47:06 | 2016-06-16T08:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,250 | py | #!/usr/bin/env python
# Generate position files for different antennasets.
#
# The imaging pipeline needs to know the position of the AARTFAAC antennae.
#
# The LOFAR repository contains a per-station AntennaFieldCSXXX.conf file in
# the directory MAC/Deployment/data/StaticMetaData/AntennaFields. These
# provide information about the position of all LOFAR antennae. In particular,
# they contain a block that looks like:
#
# LBA
# 3 [ XXXXX.XXXXX YYYYY.YYYYY ZZZZZ.ZZZZZ]
# 96 x 2 x 3 [
# X.XXXX Y.YYYY Z.ZZZZ X.XXXX Y.YYYY Z.ZZZZ
# X.XXXX Y.YYYY Z.ZZZZ X.XXXX Y.YYYY Z.ZZZZ
# ...
# X.XXXX Y.YYYY Z.ZZZZ X.XXXX Y.YYYY Z.ZZZZ
# ]
#
# This tells us about all the LBA antennae in the station. The first three
# numbers provide the reference position of the station in IRTF2005. The
# subsequent array of 96 * 2 * 3 numbers provide per-antenna offsets from that
# reference. Each offset is repeated twice, for two polarizations, but the
# positions should be identical.
#
# Note that there are 96 antennae listed. The first 48 correspond to the
# LBA_INNER antennaset; the second 48 to LBA_OUTER. This is defined in
# MAC/Deployment/data/StaticMetaData/AntennaSets.conf; we take it as read for
# now.
#
# When the AARTFAAC correlator produces correlation matrices, it will order
# them such that we start with the first antenna being used in the CS002 file,
# and end with the last antenna in the CS007 file.
#
# The imaging pipeline requires a text file that lists a single IRTF2005
# X/Y/Z position per line. They should be ordered in the same way as the
# correlator output. That is, the first line contains the ITRF position of the
# first CS002 antenna in use, and the last line contains the position of the
# last CS007 antenna in use.
#
# This script processes the AntennaFieldCSXXX.conf files to generate output
# appropriate for AARTFAAC. Specify the type of antenna (LBA, HBA) and the
# range in use (0-48 for LBA_INNER, 48-96 for LBA_OUTER) on the command line,
# together with one or more AntennaField files. E.g.:
#
# $ python antennaset.py LBA 0 48 AntennaFieldCS002.conf AntennaFieldCS003.conf
import sys
class AntennaSet(object):
def __init__(self, name, start_ant, end_ant, datafile):
self.positions = []
lines = [line.strip() for line in datafile.readlines()]
lba_start = lines.index(name)
data_start = lba_start + 3
offset = [float(x) for x in lines[lba_start+1].split()[2:5]]
for line in lines[data_start + start_ant:data_start + end_ant]:
x, y, z = [float(x) for x in line.split()[0:3]]
self.positions.append(
[offset[0] + x, offset[1] + y, offset[2] + z]
)
if __name__ == "__main__":
name = sys.argv[1] # LBA or HBA
start_ant, end_ant = [int(x) for x in sys.argv[2:4]] # LBA_OUTER = 48,96
antennasets = []
# Remaining arguments are AntennaField files.
for filename in sys.argv[4:]:
with open(filename, "r") as f:
antennasets.append(AntennaSet(name, start_ant, end_ant, f))
for antset in antennasets:
for posn in antset.positions:
print "%f %f %f" % (posn[0], posn[1], posn[2])
| [
"swinbank@transientskp.org"
] | swinbank@transientskp.org |
566a439b70fad999ee6c115c070e521142d7015a | ba5d4704dd8be5a17890cce41e8ac5e7523472ed | /archives/tests/test_model_domains.py | 166be7ae9a1cee70ef246aa5215239deff9c71c6 | [] | no_license | carogiu/cell-migration | 0fb0fdf0bff6ac5cec6cebcb60ef868ac6436574 | 0c90e14e426dfc1faa08ebba22487711dc199cf7 | refs/heads/master | 2020-12-01T11:25:18.023077 | 2020-08-10T09:28:54 | 2020-08-10T09:28:54 | 230,616,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | import unittest
import dolfin
from model.main import mesh_from_dim
from model.model_domains import BD_right, BD_left, BD_top_bottom, dom_and_bound
class TestModelDomains(unittest.TestCase):
def test_class_BD(self):
class_1 = BD_right(dim_x=2)
self.assertIsInstance(class_1, BD_right)
self.assertEqual(class_1.dim_x, 2)
class_2 = BD_left(dim_x=2)
self.assertIsInstance(class_2, BD_left)
self.assertEqual(class_2.dim_x, 2)
class_3 = BD_top_bottom(dim_y=2)
self.assertIsInstance(class_3, BD_top_bottom)
self.assertEqual(class_3.dim_y, 2)
def test_class_BD_inside(self):
class_2 = BD_right(dim_x=2)
result_inside = class_2.inside(x=[1+1e-13], on_boundary=False)
self.assertEqual(result_inside, False)
result_inside = class_2.inside(x=[1+1e-13], on_boundary=True)
self.assertEqual(result_inside, True)
result_inside = class_2.inside(x=[1+1e-12], on_boundary=True)
self.assertEqual(result_inside, True)
result_inside = class_2.inside(x=[1+1e-11], on_boundary=True)
self.assertEqual(result_inside, False)
def test_mesh_definition(self):
mesh = mesh_from_dim(nx=100, ny=100, dim_x=10, dim_y=10)
self.assertIsInstance(mesh, dolfin.RectangleMesh)
domain, boundaries = dom_and_bound(mesh, dim_x=10, dim_y=10)
if __name__ == '__main__':
unittest.main()
| [
"57912591+carogiu@users.noreply.github.com"
] | 57912591+carogiu@users.noreply.github.com |
be92341808644115b777719c2a4432641c542798 | 93211b441515263dce08cc01d98b4b42806d31dd | /kinship_analysis_allelic_dropout_dicts.py | eb8e9401f11728d70bc875fccd044a847aba8a1f | [
"MIT"
] | permissive | EdaEhler/Kinship_analysis | 24a15b845013e918d9f1090a2d7f7c8ddd87dbf2 | d64e53f1b3185d8b7f4c92bd095684337da36031 | refs/heads/master | 2021-01-13T08:57:31.774876 | 2016-09-25T10:03:32 | 2016-09-25T10:03:32 | 69,156,569 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,337 | py | ###############################
# Kinship analysis using SNPs #
# --------------------------- #
# Edvard Ehler, Ph.D., #
# Institute of Anthropology, #
# UAM Poznan, Poland, #
# 2016 #
# eda.ehler@seznam.cz #
###############################
# Based on Fung&Hu (2008) Statistical DNA Forensics: Theory, Methods and Computation
"""
For two persons X and Y, define the relatedness coefficients (k 0 ,2k 1 ,k 2 ) as
k0 = P (neither allele of X is identical by descent to alleles of Y);
k1 = P (one or the other of the alleles of X is ibd to one of the alleles of Y, but the second allele is not);
k2 = P (both alleles of X are ibd to those of Y).
"""
#--------------------
# IMPORTS
#--------
from collections import namedtuple # používám k volání příbuzenských koeficientů
from math import pow # cca 15% rychlejší než klasickej pow
from random import random
# VARIABLES
#----------
snp_list = []
# {'rs112' : {"G":0.3, "T":0, "C":0.7, "A":0}, 'rs2341': {"G":0.2, "T":0.8, "C":0, "A":0}}
freq_dict = {}
# table 3.13, chapter 3.6, page 43
relationship = namedtuple("rel_coefficients", "k0, k1, k2")
#----
parentChild = relationship(0, 1, 0)
fullSiblings = relationship(0.25, 0.5, 0.25)
halfSiblings = relationship(0.5, 0.5, 0)
grandparentGrandchild = relationship(0.5, 0.5, 0)
uncleNephew = relationship(0.5, 0.5, 0)
firstCousins = relationship(0.75, 0.25, 0)
secondCousins = relationship(0.9375, 0.0625, 0)
unrelated = relationship(1, 0, 0)
SNP_INFO = "SNP_allele_freqs.csv"
SAMPLES_GENOTYPES = "3samples_genotypes.csv"
# dle Anny - P(false positive) of homozygotes
# všechny homozygoty ve funkci divide_et_impera budu testovat, zda nejsou false positive,
# jestli jo, tak je budu brát jako heterozygoty
ALLELIC_DROPOUT = 0.00159
ALLELIC_DROPOUT_PROBS = "3samples_allelic_dropouts.csv"
# na vzorky
kz1 = {}
kz5 = {}
kz4 = {}
# na P(false homozygote) slovník (= allelic drop-out = ado)
kz1_ado = {}
kz5_ado = {}
kz4_ado = {}
snp_counter_nonzero = 0
snp_counter = 0
#-----------------
# LOADING SNP info + allel frequencies + samples genotypes
#-------------------------------------
with open(SNP_INFO, mode="r", encoding="utf-8") as snpIN:
# načti do dvou slovníků, v jednom budou pouze názvy rsxXX(asi tuple), druhý slovník bude odkazovat na jejich parametry
for radek in snpIN:
radek = radek.strip().split(";")
# jméno do snp_listu, abych po něm mohl pak cyklit
snp_list.append(radek[0])
# frekvence alel do freq_dict
# nejdříve však defaultní hodnoty
freq_dict[radek[0]] = {"G":0, "T":0, "C":0, "A":0}
freq_dict[radek[0]][radek[1]] = radek[3]
freq_dict[radek[0]][radek[2]] = radek[4]
with open(SAMPLES_GENOTYPES, mode="r", encoding="utf-8") as genoIN:
# načtu genotypy vzorků
for radek in genoIN:
# nechci vykomentované řádky a N genotypy
if not radek.startswith("#"):
if not "N" in radek:
radek = radek.strip().split(";")
#h2[radek[0]] = radek[1]
#h4[radek[0]] = radek[2]
kz1[radek[0]] = radek[1]
kz5[radek[0]] = radek[2]
kz4[radek[0]] = radek[3]
with open(ALLELIC_DROPOUT_PROBS, mode="r", encoding="utf-8") as adIN:
# načtu genotypy vzorků
for radek in adIN:
# nechci vykomentované řádky a N genotypy
if not radek.startswith("#"):
if not "N" in radek:
radek = radek.strip().split(";")
#h2[radek[0]] = radek[1]
#h4[radek[0]] = radek[2]
kz1_ado[radek[0]] = float(radek[1])
kz5_ado[radek[0]] = float(radek[2])
kz4_ado[radek[0]] = float(radek[3])
# FUNCTIONS
#----------
def divide_et_impera(snp, genotype1, genotype2, alleleCount, scenario=parentChild, jmeno1="prvni", jmeno2="druhy"):
# dle poměru alel v genotypech zavolá odpovídající funkci
# snp - name of SNP, string ('rs12345')
# genotype1 - first individual genotype, string ("CC")
# genotype2 - second individual genotype, string ("AC")
# alleleCount - number of alleles across all loci in the 2 individuals tested, int (124)
# scenario - relationship namedtuple defined earlier with relatedness coefficients (k0, k1(which is in fact 2k1, but naming problems made me to name it just k1), k2)
# jmeno1, jmeno2 - jmeno vzorku, dle toho zařídím slovník pro allelic dropout
global snp_counter, snp_counter_nonzero
#------------
# blok definice allelic drop-out slovníku (ado)
# dle toho, co přijde za jméno do funkce, volím slovník
ado1 = {}
ado2 = {}
if jmeno1.upper() == "KZ1":
ado1 = kz1_ado
elif jmeno1.upper() == "KZ4":
ado1 = kz4_ado
else:
print("jmeno1:", jmeno1)
raise NameError("jmeno1 has unknown value (not KZ1, KZ4).")
if jmeno2.upper() == "KZ4":
ado2 = kz4_ado
elif jmeno2.upper() == "KZ5":
ado2 = kz5_ado
else:
print("jmeno2:", jmeno2)
raise NameError("jmeno2 has unknown value (not KZ4, KZ5).")
#-------------------
# pomocná proměnná na testování rozřazovacího algoritmu
branch = ""
#-------------------
#Rozřazování dle genotypů:
# AA, AA
if (genotype1 == genotype2) and (genotype1[0] == genotype1[1]):
branch = "aaaa"
allele1 = genotype1[0]
allele2 = genotype1[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
funkce = ab_ab
elif drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = aa_ab
elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = aa_ab
else:
funkce = aa_aa
# AB, AB
elif (genotype1 == genotype2) and (genotype1[0] != genotype1[1]):
branch = "abab"
allele1 = genotype1[0]
allele2 = genotype1[1]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
# první možnost nedám - to by znamenalo, že se oba mohou změnit oběma směrama
#if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
# funkce = ab_ab
if drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = aa_ab
elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = aa_ab
else:
funkce = ab_ab
# AA, BB
elif (genotype1 != genotype2) and (genotype1[0] == genotype1[1]) and (genotype2[0] == genotype2[1]):
branch = "aabb"
allele1 = genotype1[0]
allele2 = genotype2[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
funkce = ab_ab
elif drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = aa_ab
elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = aa_ab
else:
funkce = aa_bb
# AA, AB
elif (genotype1 != genotype2) and (genotype1[0] == genotype1[1]) and (genotype2[0] != genotype2[1]):
branch = "aaab"
allele1 = genotype1[0]
# nevím, jestli mi přijde genotype2 AB nebo BA
allele2 = genotype2[1] if genotype2[1] != genotype1[0] else genotype2[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
#if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
# funkce = ab_ab
if drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
funkce = ab_ab
#elif drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
# funkce = aa_bb
else:
funkce = aa_ab
# AB, AA
elif (genotype1 != genotype2) and (genotype1[0] != genotype1[1]) and (genotype2[0] == genotype2[1]):
branch = "abaa"
allele1 = genotype2[0]
# nevím, jestli mi přijde genotype1 AB nebo BA
allele2 = genotype1[1] if genotype1[1] != genotype2[0] else genotype1[0]
# rozstřel genotypů na allelic dropout
drop_out_roll = random()
#if drop_out_roll <= ado1[snp] * ado2[snp]: # pravděpodobnost, že jsou oba false positive
# funkce = ab_ab
#elif drop_out_roll <= ado1[snp]: # pravděpodobnost, že je jeden false positive
# funkce = aa_ab
if drop_out_roll <= ado2[snp]: # pravděpodobnost, že je druhý false positive
funkce = ab_ab
else:
funkce = aa_ab
# frekvence alel ve srovnávací populaci (bráno z ensemblu GRCh37)
f1 = float(freq_dict[snp][allele1])
f2 = float(freq_dict[snp][allele2])
# test prints - byly špatné indexy v if-else bloku - už jsou OK
"""
print(branch)
print("genotyp:", genotype1, genotype2)
print("allele:", allele1, allele2)
print("pi,pj:", f1, f2)
print("P(ano):", funkce(f1, f2, koef=scenario))
print("P(ne):", funkce(f1, f2, koef=unrelated))
print("LR:", funkce(f1, f2, koef=scenario) / funkce(f1, f2, koef=unrelated))
input()
"""
likelihoodRatio = funkce(f1, f2, koef=scenario) / funkce(f1, f2, koef=unrelated)
snp_counter += 1
if likelihoodRatio == 0:
#print('zero', snp)
# děje se zejména při parent-child scénáři, když se neshodují genotypy
# pravděpodobnost mutací nebo silent alleles (Pinto et al. 2013, FSI:Genetics) -> 0.001-0.005
#----------------
# dle Borsting et al. 2011 (FSI:Genetics) počítá u rozdílných homozygotů jaby by se tam
# objevila "silent allele" (třeba nějaká technická chyba, že ji nenašli).
# pravděpodobnost silent allele je 1/(n+1), kde n = počet alel na všech lokusech u těchto dvou individuí
# vynásobeno konzervativním odhadem pravděpodobnosti mutace u SNPů = 10E-6
# print("+++zero+++")
#return 0.000001 * (1/(alleleCount + 1))
return 0
#print("uvnitr divide_et_impera:", jmeno1)
else:
snp_counter_nonzero += 1
return likelihoodRatio
# Fung&Hu 2008, table 5.1, page 80
# funkce, které počítají pravděpodobnost joint genotype probability za předpokladu HWE
# vstupují do nich frekvence alely 1 a 2 (f1, f2) a příbuzenské koeficienty, dle použitého scénáře
# výstup je P(Z|Y, H) - pravděpodobnost, že genotypy Z a Y mají alely identical-by-descend (ibd),
# za předpokladu hypotézy (scénáře) H (třeba že jsou siblings, nebo unrelated, nebo uncle-nephew...)
def aa_aa(f1, f2, koef):
return koef.k0 * pow(f1, 4) + koef.k1 * pow(f1, 3) + koef.k2 * pow(f1, 2)
def aa_ab(f1, f2, koef):
return 2 * koef.k0 * pow(f1, 3) * f2 + koef.k1 * pow(f1, 2) * f2
def aa_bb(f1, f2, koef):
return koef.k0 * pow(f1, 2) * pow(f2, 2)
def ab_ab(f1, f2, koef):
#print("abab:", (4 * koef.k0 * pow(f1, 2) * pow(f2, 2)) + (koef.k1 * pow(f1,2) * f2) + (koef.k1 * f1 * pow(f2, 2)) + (2 * koef.k2 * f1 * f2))
return (4 * koef.k0 * pow(f1, 2) * pow(f2, 2)) + (koef.k1 * pow(f1,2) * f2) + (koef.k1 * f1 * pow(f2, 2)) + (2 * koef.k2 * f1 * f2)
#---------------------
def allele_count(sample1, sample2):
allele_n = 0
for i in sample1:
geno1 = sample1[i]
geno2 = sample2[i]
# zanedbávám 3 a více alel, pouze bi-alelické lokusy
allele_n += 1 if geno1 == geno2 and geno1[0] == geno1[1] else 2
#print(i, allele_n)
return allele_n
#---------------------
# SKRIPT
#-------
# kz4 vs kz5
def run_kinship_analysis(sample1, sample2, hypothesis, hypothesis_name, alleleCount, name1='prvni', name2='druhy', repeats=100):
# přidán parametr name1,name2 - jméno pro výběr správného allelic drop-out slovníku
# pomocná funkce na projetí všech definovaných kombinací vstupních parametrů
# přidělal jsem možnost opakování výpočtu pro případ silent allele, allelic drop-in/drop-out
# idea je taková, že provedu výpočet 1000x-100 000x a vezmu průměr
# možná by byl lepší resampling??
global snp_counter, snp_counter_nonzero
snp_counter = 0
snp_counter_nonzero = 0
result = 1
result_list = []
# Opakování
#--------------
for _ in range(repeats):
result = 1 # při každé rundě si vynuluju vysledek
for i in sample1:
#print(i, 'result = ', result)
try:
result *= divide_et_impera(i, sample1[i], sample2[i], alleleCount, scenario=hypothesis, jmeno1=name1, jmeno2=name2)
except IndexError:
#print(i, kz1[i])
#print(i, kz5[i])
pass
#print("--")
result_list.append(result)
#--------------
# zprůměrování výsledku
result = sum(result_list)/repeats
print(name1 + " vs. " + name2)
print("Scenario:", hypothesis_name + ",", hypothesis)
print("Likelihood Ratio (p(scenario)/p(unrelated)):", result)
print("Bayes. estimate of probability of the scenario (prior probability = 0.5):", str(round((result/(result + 1))*100, 5)) + "%")
print("SNPs tried:", snp_counter/repeats)
print("SNPs with non-zero result:", snp_counter_nonzero/repeats)
print("----------------------------------------------------")
print()
#input()
scenarios_bag = (parentChild, fullSiblings, halfSiblings, grandparentGrandchild, uncleNephew, firstCousins, secondCousins)
scenarios_names = ('parent-child', 'full-siblings', 'half-siblings', 'grandparent-grandchild', 'uncle-nephew', 'first cousins', 'second cousins')
#----------
pocet_alel = allele_count(kz4, kz5)
print("Allele count:", pocet_alel)
for n, hypo in enumerate(scenarios_bag):
run_kinship_analysis(kz4, kz5, hypo, scenarios_names[n], pocet_alel, name1='KZ4', name2='KZ5', repeats=10000)
#input()
print("============================================")
print("Algorithm loops count: 10000")
print("Allelic drop-out check - using dictionary of P(false allele) unique for each SNP for each sample.")
print("No silent-allele correction, just return 0 in case of opposite homozygotes with no drop-out.")
print("********************************************")
| [
"noreply@github.com"
] | noreply@github.com |
fff7ecd42a575a75bd8c70fd7c301b8cd7a6cf9c | 6b989e9ed854c9c8a04fdcf3e9df7ad8922cf856 | /chapter01/python3_str_types.py | d8d64e34ee572126a61d5e7817be131ea8f13b09 | [
"MIT"
] | permissive | PacktPublishing/Hands-On-Enterprise-Application-Development-with-Python | 6fea9321392328648a094bd10787a4cdb873a6b6 | a59c2ecb55ed43e5bad8c6ed9b687a3e6b610e9f | refs/heads/master | 2023-02-26T11:33:14.202552 | 2023-01-30T08:51:15 | 2023-01-30T08:51:15 | 140,812,464 | 38 | 30 | MIT | 2023-02-15T20:26:05 | 2018-07-13T07:24:55 | Python | UTF-8 | Python | false | false | 253 | py | #!/bin/python3
str1 = 'I am a unicode string'
print("Type of str1 is " + str(type(str1)))
str2 = b"And I can't be concatenated to a byte string"
print("Type of str2 is " + str(type(str2)))
print("Trying to concatenate str1 and str2")
str3 = str1 + str2
| [
"sbadhwar@redhat.com"
] | sbadhwar@redhat.com |
e3ef029dcbb4f626217414ae65caf8a028b4de89 | 4a887a050564267fc26f9ccd318aa9acc1dd8bf9 | /WikigenderJsonParsing/CreateBootstrappedDatasets.py | c6dce349d732458e9bd06a06d6485aaa66bd7fc7 | [] | no_license | kp1302/Towards-Understanding-Gender-Bias-in-Neural-Relation-Extraction | e998ecab6a9aa233c7f63816546afca3af8d3993 | e3c243c2b50b21ae0b8dc12732dbf4448f545918 | refs/heads/master | 2022-12-19T06:53:14.521785 | 2020-07-06T06:05:14 | 2020-07-06T06:05:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,955 | py | from Utility import *
import argparse
import os
import random
#from sklearn.utils import resample
WORD_EMBEDDING_DIRECTORY = '../WordEmbeddings/'
BOOTSTRAP_FRACTION = 0.9
def createBootstrappedDataset(dataset_name, args):
'''
:param dataset_name: the name of the original dataset (the dataset without debiasing)
:param equalized: boolean flag; if true, the data read in will have equalized mentions
:param name_anonymized: boolean flag; if true, the data read in will be name anonymized
:param gender_swapped: boolean flag; if true, the data read in will be gender-swapped
:param swap_names:
:return:
'''
# get the full name of the dataset!
infile_names = dataset_name.split('.')
old_bs = args.bootstrapped
args.bootstrapped = False
infile_names[0] += getNameSuffix(args)
args.bootstrapped = old_bs
infile_name = infile_names[0] + "." + infile_names[1]
# read the data
data = readFromJsonFile(infile_name)
print('BOOSTRAPPED? {}'.format(args.bootstrapped))
if args.bootstrapped:
infile_names[0] += "_bootstrapped"
#data = random.sample(data, bootstrap_percentage * len(data))
#data['train'] = resample(data['train'], replace=True, n_samples=None)
data['train'] = random.sample(data['train'], int(BOOTSTRAP_FRACTION * len(data['train'])))
# write the bootstrapped dataset to a file
outfile_name = infile_names[0] + '.' + infile_names[1]
print('creating {}'.format(outfile_name))
writeToJsonFile(data, outfile_name)
writeToJsonFile(data, os.path.join(WORD_EMBEDDING_DIRECTORY, outfile_name)) # also write it to the word embeddings directory
return data
if __name__ == '__main__':
os.chdir('./WikigenderJsonParsing/') #this is for running a script in the directory above this
args = getCommandLineArgs()
createBootstrappedDataset('JsonData/Wikigender.json', args)
os.chdir('../') # return to original directory
| [
"ajg@umail.ucsb.edu"
] | ajg@umail.ucsb.edu |
d3f6ac276eb291409be7ee6ff5b98b09efd7223f | ea14dde57798cbf21446e98cb7d5f33587566f2b | /blog/migrations/0003_comment_approved_comment.py | b784d30deeb681420a713f0783f1847bacec8acb | [] | no_license | marceljorde/SE2 | fd3d821e5aa9662d863df2f12e3f6f846c198137 | b1cb85b0be2490c1ed498195aa0e943cc18d1736 | refs/heads/master | 2020-04-11T04:54:48.166034 | 2018-12-31T13:37:24 | 2018-12-31T13:37:24 | 161,531,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 2.0.9 on 2018-12-20 13:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_comment'),
]
operations = [
migrations.AddField(
model_name='comment',
name='approved_comment',
field=models.BooleanField(default=False),
),
]
| [
"marce@r119170.srs.swh.mhn.de"
] | marce@r119170.srs.swh.mhn.de |
26e2a1e09c016f6615d8caf36cb0155cbbab5dca | 262a761393d2f2de88a0ccaed96b2c4f06b56150 | /env/lib/python3.8/site-packages/isort/wrap.py | 872b096e7985b43f1ac53a7f482243b66c01bfdc | [
"MIT"
] | permissive | chaitraliv/mini-twitter | 58cac2bed3b645078422e069e7c305fd0d62e0cf | 99bbfca0b611744c829bca671300aa8dcc356ab1 | refs/heads/master | 2022-12-29T08:28:11.574173 | 2020-10-21T08:50:26 | 2020-10-21T08:50:26 | 292,632,756 | 0 | 0 | MIT | 2020-10-21T08:49:59 | 2020-09-03T17:12:40 | Python | UTF-8 | Python | false | false | 5,353 | py | import copy
import re
from typing import List, Optional, Sequence
from .settings import DEFAULT_CONFIG, Config
from .wrap_modes import WrapModes as Modes
from .wrap_modes import formatter_from_string
def import_statement(
import_start: str,
from_imports: List[str],
comments: Sequence[str] = (),
line_separator: str = "\n",
config: Config = DEFAULT_CONFIG,
multi_line_output: Optional[Modes] = None,
) -> str:
"""Returns a multi-line wrapped form of the provided from import statement."""
formatter = formatter_from_string((multi_line_output or config.multi_line_output).name)
dynamic_indent = " " * (len(import_start) + 1)
indent = config.indent
line_length = config.wrap_length or config.line_length
statement = formatter(
statement=import_start,
imports=copy.copy(from_imports),
white_space=dynamic_indent,
indent=indent,
line_length=line_length,
comments=comments,
line_separator=line_separator,
comment_prefix=config.comment_prefix,
include_trailing_comma=config.include_trailing_comma,
remove_comments=config.ignore_comments,
)
if config.balanced_wrapping:
lines = statement.split(line_separator)
line_count = len(lines)
if len(lines) > 1:
minimum_length = min(len(line) for line in lines[:-1])
else:
minimum_length = 0
new_import_statement = statement
while len(lines[-1]) < minimum_length and len(lines) == line_count and line_length > 10:
statement = new_import_statement
line_length -= 1
new_import_statement = formatter(
statement=import_start,
imports=copy.copy(from_imports),
white_space=dynamic_indent,
indent=indent,
line_length=line_length,
comments=comments,
line_separator=line_separator,
comment_prefix=config.comment_prefix,
include_trailing_comma=config.include_trailing_comma,
remove_comments=config.ignore_comments,
)
lines = new_import_statement.split(line_separator)
if statement.count(line_separator) == 0:
return _wrap_line(statement, line_separator, config)
return statement
def line(content: str, line_separator: str, config: Config = DEFAULT_CONFIG) -> str:
"""Returns a line wrapped to the specified line-length, if possible."""
wrap_mode = config.multi_line_output
if len(content) > config.line_length and wrap_mode != Modes.NOQA: # type: ignore
line_without_comment = content
comment = None
if "#" in content:
line_without_comment, comment = content.split("#", 1)
for splitter in ("import ", ".", "as "):
exp = r"\b" + re.escape(splitter) + r"\b"
if re.search(exp, line_without_comment) and not line_without_comment.strip().startswith(
splitter
):
line_parts = re.split(exp, line_without_comment)
if comment:
_comma_maybe = (
"," if (config.include_trailing_comma and config.use_parentheses) else ""
)
line_parts[-1] = f"{line_parts[-1].strip()}{_comma_maybe} #{comment}"
next_line = []
while (len(content) + 2) > (
config.wrap_length or config.line_length
) and line_parts:
next_line.append(line_parts.pop())
content = splitter.join(line_parts)
if not content:
content = next_line.pop()
cont_line = _wrap_line(
config.indent + splitter.join(next_line).lstrip(), line_separator, config
)
if config.use_parentheses:
if splitter == "as ":
output = f"{content}{splitter}{cont_line.lstrip()}"
else:
_comma = "," if config.include_trailing_comma and not comment else ""
if wrap_mode in (
Modes.VERTICAL_HANGING_INDENT, # type: ignore
Modes.VERTICAL_GRID_GROUPED, # type: ignore
):
_separator = line_separator
else:
_separator = ""
output = (
f"{content}{splitter}({line_separator}{cont_line}{_comma}{_separator})"
)
lines = output.split(line_separator)
if config.comment_prefix in lines[-1] and lines[-1].endswith(")"):
content, comment = lines[-1].split(config.comment_prefix, 1)
lines[-1] = content + ")" + config.comment_prefix + comment[:-1]
return line_separator.join(lines)
return f"{content}{splitter}\\{line_separator}{cont_line}"
elif len(content) > config.line_length and wrap_mode == Modes.NOQA: # type: ignore
if "# NOQA" not in content:
return f"{content}{config.comment_prefix} NOQA"
return content
_wrap_line = line
| [
"chaitrali.vaidya@instazen.com"
] | chaitrali.vaidya@instazen.com |
a199a85117918b1c8fe6769bfdcbff3be408262e | 5186cc912502f9f32948c3810b5adc2cd0f015d8 | /soybean/reactor.py | b9e91523fe64d36b907749d9656b9625adbdbb63 | [
"Apache-2.0"
] | permissive | lcgong/soybean | c0ef4f1a88191a653bfd1f70881a2f1e470943fd | 43fd891113b05c79419d7c0850145c8284e51206 | refs/heads/main | 2023-02-27T08:47:47.198713 | 2021-02-03T04:00:52 | 2021-02-03T04:00:52 | 334,369,214 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | import inspect
import asyncio
import logging
from rocketmq.client import PushConsumer, ConsumeStatus
from .utils import make_group_id, json_loads
from .event import OccupiedEvent
from .typing import HandlerType
from .exceptions import UnkownArgumentError
logger = logging.getLogger("soybean.reactor")
class Reactor:
def __init__(self, channel, topic: str, expression: str,
handler: HandlerType, depth: int):
self._channel = channel
self._topic = topic
self._expression = expression
self._handler = handler
self._reactor_id = make_group_id(channel.name, handler, depth)
self._consumer = None
argvals_getter = build_argvals_getter(handler)
self._handler_argvals_getter = argvals_getter
self._busy_event = None
@property
def reactor_id(self):
return self._reactor_id
async def start(self):
import threading
print(
f"reacter-start thread: {threading.get_ident()}, loop: {id(asyncio.get_event_loop())}")
consumer = PushConsumer(group_id=self._reactor_id)
consumer.set_thread_count(1)
consumer.set_name_server_address(self._channel.namesrv_addr)
self._busy_event = OccupiedEvent()
loop = asyncio.get_running_loop()
def run_coroutine(coroutine):
# 在其它线程以线程安全的方式执行协程,并阻塞等待执行结果
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
return future.result
def _callback(msg):
run_coroutine(self._busy_event.acquire())
try:
arg_values = self._handler_argvals_getter(msg)
run_coroutine( self._handler(*arg_values))
return ConsumeStatus.CONSUME_SUCCESS
except Exception as exc:
logger.error((f"caught an error in reactor "
f"'{self._reactor_id}': {exc}"),
exc_info=exc)
return ConsumeStatus.RECONSUME_LATER
finally:
run_coroutine(self._busy_event.release())
consumer.subscribe(self._topic, _callback, expression=self._expression)
consumer.start()
self._consumer = consumer
async def stop(self):
await self._busy_event.wait_idle()
# 问题:当前rocket-client-cpp实现在shutdown之前并不能保证工作线程正常结束
# 这会导致工作线程和asyncio死锁,所以得到callback线程里任务结束后,再多等待
# 一会儿,等待rocket-client-cpp处理完consumer工作线程,再关闭consumer
await asyncio.sleep(0.5)
if self._consumer:
self._consumer.shutdown()
self._consumer = None
def build_argvals_getter(handler):
arguments = inspect.signature(handler).parameters
getters = []
unknowns = []
for arg_name, arg_spec in arguments.items():
getter_factory = _getter_factories.get(arg_name)
if getter_factory is not None:
getters.append(getter_factory(arg_spec))
continue
unknowns.append((arg_name, arg_spec))
if unknowns:
mod = handler.__module__
func = handler.__qualname__
args = ", ".join([f"'{name}'" for name, spec in unknowns])
errmsg = f"Unknown arguments: {args} of '{func}' in '{mod}'"
raise UnkownArgumentError(errmsg)
def _getter(msgobj):
return (arg_getter(msgobj) for arg_getter in getters)
return _getter
def getter_message(arg_spec):
if arg_spec.annotation == str:
return lambda msgobj: msgobj.body.decode("utf-8")
elif arg_spec.annotation == bytes:
return lambda msgobj: msgobj.body
else:
return lambda msgobj: json_loads(msgobj.body.decode("utf-8"))
def getter_msg_id(arg_spec):
return lambda msgobj: getattr(msgobj, "id")
def getter_msg_topic(arg_spec):
return lambda msgobj: getattr(msgobj, "tpoic").decode("utf-8")
def getter_msg_keys(arg_spec):
return lambda msgobj: getattr(msgobj, "keys").decode("utf-8")
def getter_msg_tags(arg_spec):
return lambda msgobj: getattr(msgobj, "tags").decode("utf-8")
_getter_factories = {
"message": getter_message,
"message_id": getter_msg_id,
"message_topic": getter_msg_topic,
"message_keys": getter_msg_keys,
"message_tags": getter_msg_tags,
"msg_id": getter_msg_id,
"msg_topic": getter_msg_topic,
"msg_keys": getter_msg_keys,
"msg_tags": getter_msg_tags,
}
| [
"lcgong@gmail.com"
] | lcgong@gmail.com |
0072cc0115f29b67a47d46881396394aa26d284e | 2f1d04677be2bff8983e2521eb0beb94b694a7a5 | /setup.py | 418e061de985d028c2e3e9e462f2f8c90763342e | [] | no_license | adisuissa/rh_img_access_layer | d510c40537385eab4332aa7ef0cf17ea39afd902 | 42a48f8ed10ef7addd7b1ce5e47f8a0022f80642 | refs/heads/master | 2020-07-09T21:44:34.760714 | 2019-12-31T14:46:40 | 2019-12-31T14:46:40 | 204,090,213 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | import setuptools
setuptools.setup(
description="Image reading layer for the Rhoana pipeline",
# install_requires=[
# "pyaml>=15.8.2"
# ],
name="rh_img_access_layer",
packages=["rh_img_access_layer"],
dependency_links = ['http://github.com/adisuissa/gcsfs/tarball/master#egg=fs_gcsfs-0.4.1'],
url="https://github.com/Rhoana/rh_img_access_layer",
version="0.0.1"
)
| [
"adi.suissa@gmail.com"
] | adi.suissa@gmail.com |
403552e209068810af7e723ab196627c656e93e2 | 57d0789235d8ab014b584a285697b8db2763f1df | /day42.py | 50040391605109c6447c00500b14c0e0accf6259 | [] | no_license | Werefriend/CS112-Spring2012 | 8856ccde68c594f87932b96cc8bc41288095bfb5 | c79f1894876f97669f7628b446c6068b4bb5f4d0 | refs/heads/master | 2020-12-25T17:13:20.016669 | 2012-04-02T14:17:35 | 2012-04-02T14:17:35 | 3,266,367 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | #!/usr/bin/env python
color = (255, 10, 30)
people = {'Jonah' : "stupid", 'Alec' : "smelly",'Jack' : "ugly", 'Paul' : "awesome"}
matrix = ["hello", 2.0, 5, [10, 20]]
eng2sp = {}
eng2sp['one'] = 'uno'
eng2sp['two'] = 'dos'
for k,v in eng2sp.items():
print k,v
print eng2sp['one']
#print matrix
#print matrix[0]
#print matrix[1]
#print matrix[2]
#print matrix[3][0]
#print matrix[3][1]
#where would I use a multidimensional array??
#imagine you have an image of rows and columns...
#for each tuple pixel, there are a red, green, and blue value
#TUPLES
#a tuple is any two or more things groued together
#unlike a list, these are immutable, meaning not changeable
#DICTIONARIES
#like a list, but the list defines the dictionary by a key, not an order
print len(people)
print people.keys()
print people.values()
s = "Monty Python"
print s[6:12]
| [
"reeves.sam@gmail.com"
] | reeves.sam@gmail.com |
067a7abea5aa8ea89d7339cdb1ac2cad200418bb | 5fbf2adec8d7647b9aeefa51695aa3f13ee57810 | /server/load_backup_locally.py | 076c18cbae05647fcf9c789b079ff13e403dc7b7 | [] | no_license | angelacantfly/dancedeets-monorepo | 8bb6579f6f5d30e88c8d4c0e239c6c8fed678094 | 6b7a48d91d0737010acd9e08a89d99c2c982205a | refs/heads/master | 2021-01-20T09:14:22.613044 | 2017-08-26T21:48:14 | 2017-08-26T21:48:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,381 | py | #!/usr/bin/python
"""
# App Engine import data from Datastore Backup to localhost
You can use this script to import large(ish) App Engine Datastore backups to your localohst dev server.
## Getting backup files
Follow instructions from Greg Bayer's awesome article to fetch the App Engine backups:
http://gbayer.com/big-data/app-engine-datastore-how-to-efficiently-export-your-data/
Basically, download and configure gsutil and run:
```
gsutil -m cp -R gs://your_bucket_name/your_path /local_target
```
## Reading data to your local (dev_appserver) application
Copy-paste this gist to your Interactive Console, set correct paths and press `Execute`.
(default: http://localhost:8000/console)
"""
import sys
sys.path.insert(0, '/usr/local/google_appengine')
print sys.path
from google.appengine.api.files import records
from google.appengine.datastore import entity_pb
from google.net.proto.ProtocolBuffer import ProtocolBufferDecodeError
from google.appengine.ext import ndb
from os.path import isfile
from os.path import join
from os import listdir
from events.eventdata import DBEvent
def run():
# Set your downloaded folder's path here (must be readable by dev_appserver)
mypath = '/Users/lambert/Dropbox/dancedeets/data/datastore_backup_datastore_backup_2016_11_19_DBEvent/15700286559371541387849311E815D'
# Se the class of the objects here
cls = DBEvent
# Set your app's name here
appname = "dev~None"
# Do the harlem shake
onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
for file in onlyfiles:
i = 0
try:
raw = open(mypath + "/" + file, 'r')
reader = records.RecordsReader(raw)
to_put = list()
for record in reader:
entity_proto = entity_pb.EntityProto(contents=record)
entity_proto.key_.app_ = appname
obj = cls._from_pb(entity_proto)
to_put.append(obj)
i += 1
if i % 100 == 0:
print "Saved %d %ss" % (i, '') #entity.kind())
ndb.put_multi(to_put) # use_memcache=False)
to_put = list()
ndb.put_multi(to_put) # use_memcache=False)
to_put = list()
print "Saved %d" % i
except ProtocolBufferDecodeError:
""" All good """
run()
| [
"mlambert@gmail.com"
] | mlambert@gmail.com |
c904e572df97233d9e65ac3224ef24e0694134a6 | 24faec36e3196fdc77837c45e5934a3f71426ff8 | /college system.py | 83c3c5843f1f04fb7ed197af94ae6ee69f3de32c | [] | no_license | MuhammadRasiMS/college-management | b1a147f2121c5d0733718171078b259e132f8400 | fc2187e567832c4af1d6a4ec4d83a23af8793b9d | refs/heads/master | 2023-08-08T04:49:07.517448 | 2021-09-11T04:39:26 | 2021-09-11T04:39:26 | 405,284,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,248 | py | import mysql.connector as mysql
db = mysql.connect(host="localhost", user="root", password="", database="college")
command_handler = db.cursor(buffered=True)
def teacher_session():
while 1:
print("")
print("Teacher's Menu")
print("1. Mark student register")
print("2. view register")
print("3. Logout")
user_option = input(str("Option : "))
if user_option == "1":
print("")
print("Mark student register")
command_handler.execute("SELECT username FROM users WHERE privilege = 'student'")
records = command_handler.fetchall()
date = input(str("Date : DD/MM/YYYY : "))
for record in records:
record = str(record).replace("'", "")
record = str(record).replace(",", "")
record = str(record).replace("(", "")
record = str(record).replace(")", "")
# Present | #Absent | #Late
status = input(str("Status for " + str(record) + "P/A/L : "))
query_vals = (str(record), date, status)
command_handler.execute("INSERT INTO attendance (username, date, status) VALUES(%s,%s,%s)", query_vals)
db.commit()
print(record + " Marked as " + status)
elif user_option == "2":
print("")
print("Viewing all student registers")
command_handler.execute("SELECT username, date, status FROM attendance")
records = command_handler.fetchall()
print("Displaying all registers")
for record in records:
print(record)
elif user_option == "3":
break
else:
print("No valid option was selected")
def student_session(username):
while 1:
print("")
print("Student's Menu")
print("")
print("1. View Register")
print("2. Download Register")
print("3. Logout")
user_option = input(str("Option : "))
if user_option == "1":
print("Displaying Register")
username = (str(username),)
command_handler.execute("SELECT date, username, status FROM attendance WHERE username = %s", username)
records = command_handler.fetchall()
for record in records:
print(record)
elif user_option == "2":
print("Downloading Register")
username = (str(username),)
command_handler.execute("SELECT date, username, status FROM attendance WHERE username = %s", username)
records = command_handler.fetchall()
for record in records:
with open("register.txt", "w") as f:
f.write(str(records)+"\n")
f.close()
print("All records saved")
elif user_option == "3":
break
else:
print("No valid option was selected")
def admin_session():
while 1:
print("")
print("Admin Menu")
print("1. Register new Student")
print("2. Register new Teacher")
print("3. Register Existing Student")
print("4. Register Existing Student")
print("5. Logout")
user_option = input(str("option : "))
if user_option == "1":
print("")
print("Register New Student")
username = input(str("Student username : "))
password = input(str("Student password : "))
query_vals = (username, password)
command_handler.execute("INSERT INTO users (username,password,privilege) VALUES (%s,%s,'student')",
query_vals)
db.commit()
print(username + " has been registered as a student")
elif user_option == "2":
print("")
print("Register New Teacher")
username = input(str("Teacher username : "))
password = input(str("Teacher password : "))
query_vals = (username, password)
command_handler.execute("INSERT INTO users (username,password,privilege) VALUES (%s,%s,'teacher')",
query_vals)
db.commit()
print(username + " has been registered as a teacher")
elif user_option == "3":
print("")
print("Delete Existing Student Account")
username = input(str("Student username : "))
query_vals = (username, "student")
command_handler.execute("DELETE FROM users WHERE username = %s AND privilege = %s ", query_vals)
db.commit()
if command_handler.rowcount < 1:
print("User not found")
else:
print(username + " has been deleted")
elif user_option == "4":
print("")
print("Delete Existing Teacher Account")
username = input(str("Teacher username : "))
query_vals = (username, "teacher")
command_handler.execute("DELETE FROM users WHERE username = %s AND privilege = %s ", query_vals)
db.commit()
if command_handler.rowcount < 1:
print("User not found")
else:
print(username + " has been deleted")
elif user_option == "5":
break
else:
print("No valid option selected")
def auth_student():
print("")
print("Student's Login")
print("")
username = input(str("Username : "))
password = input(str("Password : "))
query_vals = (username, password, "student")
command_handler.execute("SELECT username FROM users WHERE username = %s AND password = %s AND privilege = %s",
query_vals)
if command_handler.rowcount <= 0:
print("Invalid login details")
else:
student_session(username)
def auth_teacher():
print("")
print("Teacher's Login")
print("")
username = input(str("Username : "))
password = input(str("Password : "))
query_vals = (username, password)
command_handler.execute("SELECT * FROM users WHERE username = %s AND password = %s AND privilege = 'teacher'",
query_vals)
if command_handler.rowcount <= 0:
print("Login not recognised")
else:
teacher_session()
def auth_admin():
print("")
print("Admin Login")
print("")
username = input(str("Username : "))
password = input(str("Password : "))
if username == "admin":
if password == "password":
admin_session()
else:
print("Incorrect password !")
else:
print("Login details not recognised")
def main():
while 1:
print("Welcome to the college system")
print("")
print("1. Login as student")
print("2. Login as teacher")
print("3. Login as admin")
user_option = input(str("Option : "))
if user_option == "1":
auth_student()
elif user_option == "2":
auth_teacher()
elif user_option == "3":
auth_admin()
else:
print("No valid option was selected")
main()
| [
"muhammadrasi0@gmail.com"
] | muhammadrasi0@gmail.com |
6f916b447bc8946eb14222b33526f345a1cc0c4f | 21324be3146af56c524a332b7633d4bb20dfa594 | /rest/taskrouter/reservations/instance/get/example-1/example-1.py | 7912a4008771c3b02fa7515a95a8623362ce22c9 | [
"MIT"
] | permissive | mrphishxxx/api-snippets | c0a7967c6fced7413a1c4f695041cff2d85bcf6c | 34faf794971fadfab1d2666647d0322522f4a179 | refs/heads/master | 2021-01-22T15:00:40.502532 | 2016-05-13T23:11:33 | 2016-05-13T23:11:33 | 58,898,143 | 1 | 0 | null | 2016-05-16T02:37:58 | 2016-05-16T02:37:58 | null | UTF-8 | Python | false | false | 522 | py | # Download the Python helper library from twilio.com/docs/python/install
from twilio.rest import TwilioTaskRouterClient
# Your Account Sid and Auth Token from twilio.com/user/account
account_sid = "{{ account_sid }}"
auth_token = "{{ auth_token }}"
workspace_sid = "{{ workspace_sid }}"
task_sid = "{{ task_sid }}"
client = TwilioTaskRouterClient(account_sid, auth_token)
reservation = client.reservations(workspace_sid, task_sid).get(reservation_sid)
print reservation.reservation_status
print reservation.worker_name | [
"eliecerhdz@gmail.com"
] | eliecerhdz@gmail.com |
d396b8340a6bf61e29cf5d053679b246a4c33040 | 689fe220a0f5b3adc40b19f7b63b571a6bf412bb | /present_absent_loci.py | 7f98cf35431124e36b99276326a6a2bb170683a2 | [] | no_license | NinhVu/filteringStacks | b97bb05fbf04f1490a5a6277e063063c2451732f | 19fb7f45ea78993e04afb1a55de743b5faa203cb | refs/heads/master | 2016-08-12T16:00:31.896619 | 2016-03-13T04:22:36 | 2016-03-13T04:22:36 | 53,767,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,788 | py | #!/usr/bin/python3.4
# present_absent.py 3/12/16 by Ninh Vu
# This program will filter loci/stacks found in only individuals ask by user
import glob, sys, os
os.getcwd()
input_list = input("\nEnter individuals in catalog (uStacks id) you want to filter e.g. 106,121,112,120 : ")
# convert input list into list of integers and sort
user_list = input_list.split(",")
user_list = list(map(int, user_list))
user_list.sort()
print("\nOnly stacks with these individuals will be filtered:",user_list,"\n")
print("Takes a few seconds or minutes to filter depending on number of stacks/loci in catalog...")
# filter tags.tsv__________________________________________________________________________________________________________________________________________________
for file in glob.glob("*.tags.tsv"): # open ***.catalog.tags.tsv file in current directory
tags = open(file, 'r')
header = tags.readline()
data = tags.readline()
tags_tsv_loci=[]
while data:
# split row into list and define variables for loop below
catCount = 0
rowItems = data.split("\t")
# v2: split into oneList then create two lists: sampleID and catalogID. Convert both lists into integers, remove duplicate items and finally sort sampleID
for y in rowItems: # loop takes strings and convert into list of sample_catalogs
if catCount == 8:
samples_catalog = rowItems[8]
oneList = samples_catalog.split(",") # e.g. ['27_22319', '28_874']
catCount +=1
sampleID = [i.split('_')[0] for i in oneList] # split oneList and make sample list. [0] represents the first item of split item.
catalogID = [i.split('_')[1] for i in oneList] # split oneList and make catalog list. [1] represents the second item of the split item. Not necessary here.
sampleID, catalogID = list(map(int, sampleID)), list(map(int, catalogID))
sampleID = list(set(sampleID)) # REMOVE DUPLICATE B/C YOU WANT ALL STACKS EVEN ONES WITH MULTITPLE COPIES
sampleID.sort() # sort sampleID
if sampleID == user_list:
tags_tsv_loci.append(rowItems[2])
# read next line
data = tags.readline()
tags_tsv_loci = list(map(int, tags_tsv_loci)) # convert string list to int list
tags_tsv_loci = list(set(tags_tsv_loci)) # remove duplicate items
tags_tsv_loci.sort() # sort loci
tags.close()
# create whitelist.txt_____________________________________________________________________________________________________________________________________________
whitelist = open('present_absent_whitelist.txt', 'w')
whitelist.write('\n'.join('%s' % x for x in tags_tsv_loci)) # write whitelist with only locus
whitelist.write('\n')
print("\n\nYour present/absent stacks of whitelist file present_absent_whitelist.txt is ready.\n\n\n")
whitelist.close()
| [
"ninh.vu@idfg.idaho.gov"
] | ninh.vu@idfg.idaho.gov |
5a0dfba91d758caa2da4d972f8b603773eb86654 | 22fcb33a8d110630a4e090a9a3202618f52376d6 | /videos/migrations/0001_initial.py | 2f19862eba1dfa585ace0055705938d6b52090dd | [] | no_license | karandeepSJ/CVIT-UserStudyPortal | b5f08ef2833b23d26da5ab1ecfe2494ab26e4021 | a7ff3b81fea4a8333d83c1c89ebc56747ca541c8 | refs/heads/master | 2020-05-19T22:10:36.058553 | 2019-08-30T11:57:08 | 2019-08-30T11:57:08 | 185,241,744 | 0 | 1 | null | 2019-08-30T11:57:09 | 2019-05-06T17:28:15 | JavaScript | UTF-8 | Python | false | false | 545 | py | # Generated by Django 2.1.4 on 2019-04-26 18:29
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='BVH',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('action', models.CharField(max_length=100)),
('path', models.CharField(max_length=200)),
],
),
]
| [
"karan.jkps@gmail.com"
] | karan.jkps@gmail.com |
34e0d339fa61eb2fba8a107ea109b6b0c56efc1e | 743d4545702532c967efee2c12015d91853b6b80 | /orders/migrations/0001_initial.py | 50adf5b21efe66d7cf544e46d52e15ce62c1faa2 | [] | no_license | SOAD-Group-36/server | 81a7ced2149174fe4d9c1644ee2afd78054d7d29 | 5a5a1e2cd4a361cff8fff008600d65d6dc8edaab | refs/heads/main | 2023-02-03T06:44:36.041311 | 2020-12-12T10:45:21 | 2020-12-12T10:45:21 | 305,055,627 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # Generated by Django 3.1.2 on 2020-11-11 15:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('products', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.IntegerField(default=1)),
('price', models.DecimalField(decimal_places=2, max_digits=7)),
('placed_on', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(choices=[('Pl', 'Placed'), ('Pr', 'Processed'), ('Pk', 'Packed'), ('Sh', 'Shipped'), ('Dl', 'Delivered'), ('Rj', 'Rejected'), ('Rt', 'Returned'), ('Rc', 'Received')], default='Pl', max_length=2)),
('product', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='orders', to='products.product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"masterashu@live.in"
] | masterashu@live.in |
723900ac72e65ec5aa7c94b94924dc0e69cf8764 | c5effe7f4efe739df5f4567f64cfa7b76f843aee | /OCR++/myproject/myapp/urls.py | 38947087c61fc1a9fd0a2f57125ca42ef8a884f5 | [] | no_license | Kabongosalomon/ocrplusplus | 16180f8239fb2113dff4568c0c3b98930e050071 | 7dc3f225306a545b3768311eafea2fa56959d950 | refs/heads/master | 2021-09-07T15:34:33.738598 | 2018-02-25T04:47:37 | 2018-02-25T04:47:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # -*- coding: utf-8 -*-
from django.conf.urls import patterns, url
from . import views
urlpatterns = patterns('myproject.myapp.views',
url(r'^$', 'home', name = 'home')
url(r'^list/$', 'list', name='list'),
url(r'^list/runScript/$', 'runScript', name='runScript'),
# url(r'^list/vote/$', 'vote', name='vote'),
# url(r'^list/upload/$', 'upload', name='upload'),
)
| [
"ocrplusplus123@gmail.com"
] | ocrplusplus123@gmail.com |
a3fecf2b2639a499281789ccf1c9a980633503b5 | 8d91f8867fb5b72ca257d9e7152188914154ccd1 | /pune/service/deploy.py | 34f54c7777056940aca2674eb70c75a4be27b75b | [] | no_license | liwushuo/pune | c6420e9a3f65711cc7a6c578720122e5b7f53eb9 | 23eae59fc3d3515903700740fade1bce8b8d6e12 | refs/heads/master | 2021-01-10T08:10:41.056344 | 2016-04-18T08:45:01 | 2016-04-18T08:45:01 | 53,919,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,488 | py | # -*- coding: utf-8 -*-
from datetime import datetime
from flask import current_app
from pune.core import celery
from pune.core import db
from pune.models import Deploy
class DeployService(object):
@staticmethod
def get(deploy_id):
deploy = Deploy.query.get(deploy_id)
return deploy and deploy.to_dict()
@staticmethod
def add(name, project_id, environment_id, release_id, operator_id, task_id):
deploy = Deploy(name=name, project_id=project_id, environment_id=environment_id,
release_id=release_id, operator_id=operator_id, task_id=task_id)
db.session.add(deploy)
db.session.commit()
return deploy.to_dict()
@staticmethod
def list_by_environment(environment_id, offset, limit):
deploys = (Deploy.query.filter_by(environment_id=environment_id)
.order_by(Deploy.created_at.desc())
.offset(offset)
.limit(limit)
.all())
return [deploy.to_dict() for deploy in deploys]
# TODO: not safe at all...
@staticmethod
def count_running_by_environment(environment_id):
count = Deploy.query.filter_by(environment_id=environment_id, status=Deploy.Status.RUNNING).count()
return count
@staticmethod
def count_by_environment(environment_id):
count = Deploy.query.filter_by(environment_id=environment_id).count()
return count
@staticmethod
def mark_succeeded(deploy_id):
Deploy.query.filter_by(id=deploy_id, status=Deploy.Status.RUNNING).update({'status':Deploy.Status.SUCCEEDED, 'finished_at': datetime.utcnow()})
db.session.commit()
@staticmethod
def mark_failed(deploy_id):
Deploy.query.filter_by(id=deploy_id, status=Deploy.Status.RUNNING).update({'status':Deploy.Status.FAILED, 'finished_at': datetime.utcnow()})
db.session.commit()
@staticmethod
def mark_cancelled(deploy_id):
Deploy.query.filter_by(id=deploy_id, status=Deploy.Status.RUNNING).update({'status':Deploy.Status.CANCELLED, 'finished_at': datetime.utcnow()})
db.session.commit()
@staticmethod
def cancel_task(deploy_id):
deploy = Deploy.query.get(deploy_id)
print deploy.task_id
celery.control.revoke(deploy.task_id, terminate=False)
DeployService.mark_cancelled(deploy_id)
@staticmethod
def update():
pass
| [
"maplevalley8@gmail.com"
] | maplevalley8@gmail.com |
c5ebdf4e4a222fa96d4d8a27ede2f428ab34f5f6 | 59f0ae12b81de3c9d5a29ce82425b9498fee2c1b | /tests/test_application.py | a89c2dcd0619356ab4b4fe39088ff5eea083d3e6 | [] | no_license | Cheongmin/VoiceReader-Rest | 9f99f14a60b97ccd8d97b74c6196a644a983684c | 599ffb8a552bab9433389eec671ea97cf4be67d1 | refs/heads/master | 2022-12-11T10:38:31.843698 | 2019-02-08T14:04:12 | 2019-02-08T14:04:12 | 155,198,951 | 0 | 0 | null | 2022-12-08T02:13:26 | 2018-10-29T11:16:16 | Python | UTF-8 | Python | false | false | 390 | py | from voicereader import application
def test_create(monkeypatch):
monkeypatch.setattr('voicereader.api_v1.middlewares.init_app', lambda app: None)
monkeypatch.setattr('voicereader.api_v1.middlewares.jwt.init_api', lambda api: None)
app = application.create()
res = app.test_client().get('api/ping')
assert res.status_code == 200
assert res.get_data() == b'pong'
| [
"gyuhwan.a.kim@gmail.com"
] | gyuhwan.a.kim@gmail.com |
ebc0f24740813770b38a7fd3c48bc48a8611dd75 | 55b132bd206ddd4e84fa9de2f6c06ccf50385d2d | /flearn/models/Fmnist/mclr.py | 15d83ee2608d9d7fa88edd105ea44aad625afe53 | [] | no_license | XinJiang1994/HFmaml | 9b58fab7a1a1f3d153103ceb0cd964d5d49a1ed4 | 15e70293c896b78d054dd20901a1941d1a91d40d | refs/heads/master | 2023-02-01T15:37:38.882104 | 2020-12-17T06:51:42 | 2020-12-17T06:51:42 | 288,163,757 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,125 | py | import numpy as np
import tensorflow as tf
from flearn.models.FedmamlBaseModel import BaseModel
from flearn.utils.model_utils import active_func
### This is an implenmentation of Hessian Free maml meta learning algirithm propoesed by Sheng Yue####
class Model(BaseModel):
def __init__(self,params):
self.num_classes=params['num_classes']
super().__init__(params)
def get_input(self):
'''
:return:the placeholders of input: features_train,labels_train,features_test,labels_test
'''
features_train = tf.placeholder(tf.float32, shape=[None, 784], name='features_train')
labels_train = tf.placeholder(tf.float32, shape=[None, 10], name='labels_train')
features_test = tf.placeholder(tf.float32, shape=[None, 784], name='features_test')
labels_test = tf.placeholder(tf.float32, shape=[None, 10], name='labels_test')
return features_train,labels_train,features_test,labels_test
def forward_func(self,inp, weights, w_names , reuse = False):
'''
:param inp: input
:param weights: theta
:param reuse:
:return: model y
when overload this function you should make w=dict(zip(w_names,weights))
'''
weights = dict(zip(w_names, weights))
hidden = tf.matmul(inp, weights['w1']) + weights['b1']
hidden = active_func(hidden)
hidden = tf.matmul(hidden, weights['w2']) + weights['b2']
hidden = active_func(hidden)
hidden = tf.matmul(hidden, weights['w3']) + weights['b3']
return hidden
def construct_weights(self):
'''
:return:weights
'''
w1 = tf.Variable(tf.truncated_normal([784, 32], stddev=0.01), name='w1')
b1 = tf.Variable(tf.zeros([32]), name='b1')
w2 = tf.Variable(tf.truncated_normal([32, 64], stddev=0.01), name='w2')
b2 = tf.Variable(tf.zeros([64]), name='b2')
w3 = tf.Variable(tf.truncated_normal([64, self.num_classes], stddev=0.01), name='w3')
b3 = tf.Variable(tf.zeros([self.num_classes]), name='b3')
return [w1, b1, w2, b2, w3, b3]
| [
"xinjiang@csu.edu.cn"
] | xinjiang@csu.edu.cn |
3a6f927241b180e157f7756d4833dee91440dfa9 | 7c8bd2e26fdabf1555e0150272ecf035f6c21bbd | /삼성기출/새로운 게임2.py | 3f7cacad987e8780f64a22bcecc01d30ec281fc1 | [] | no_license | hyeokjinson/algorithm | 44090c2895763a0c53d48ff4084a96bdfc77f953 | 46c04e0f583d4c6ec4f51a24f19a373b173b3d5c | refs/heads/master | 2021-07-21T10:18:43.918149 | 2021-03-27T12:27:56 | 2021-03-27T12:27:56 | 245,392,582 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,461 | py | from _collections import deque
#체스판 말 갯수:k(1번~k번)
#이동방향:위,아래,왼쪽,오른쪽
#흰색인 경우 그 칸으로 이동,이동하는 칸에 말이 있으면 그곳에 스택 쌓기
#빨간색인 경우 이동하고 순서 reverse
#파란색인 경우 말의 이동방향을 역방향 한칸 이동 ,이동칸이 파란색인 경우 이동x
dx=[0,0,-1,1]
dy=[1,-1,0,0]
rev_direction={0:1,1:0,2:3,3:2}
def check():
for i in range(n):
for j in range(n):
if len(start[i][j])>=4:
return True
return False
def solve():
turn=0
p=0
while True:
turn+=1
if turn>1000:
return -1
for number in range(1,k+1):
x,y,d=horse[number]
nx,ny=x+dx[d],y+dy[d]
if nx<0 or nx>=n or ny<0 or ny>=n or arr[nx][ny]==2:
nd=rev_direction[d]
nx,ny=x+dx[nd],y+dy[nd]
if nx<0 or nx>=n or ny<0 or ny>=n or arr[nx][ny]==2:
horse[number][2]=nd
continue
p=1
if arr[nx][ny]==0:
left=start[x][y][:start[x][y].index(number)]
right=start[x][y][start[x][y].index(number):]
start[x][y]=left
start[nx][ny].extend(right)
if len(start[nx][ny])>=4:
return turn
for i in right:
horse[i][0],horse[i][1]=nx,ny
if p==1:
horse[number][2]=nd
p=0
elif arr[nx][ny]==1:
left = start[x][y][:start[x][y].index(number)]
right = start[x][y][start[x][y].index(number):]
start[x][y] = left
right.reverse()
start[nx][ny].extend(right)
if len(start[nx][ny]) >= 4:
return turn
for i in right:
horse[i][0], horse[i][1] = nx, ny
if p == 1:
horse[number][2] = nd
p = 0
if __name__ == '__main__':
n,k=map(int,input().split())
#0:흰색,1:빨간색,2:파란색
arr=[list(map(int,input().split()))for _ in range(n)]
start=[[[]*n for _ in range(n)] for _ in range(n)]
horse=dict()
for i in range(1,k+1):
x,y,v=map(int,input().split())
start[x-1][y-1].append(i)
horse[i]=[x-1,y-1,v-1]
print(solve())
| [
"hjson817@gmail.com"
] | hjson817@gmail.com |
8b57c9efa4983527dbd55908cbb5b5acbd4edbeb | 20e3ee6642d20578e48756963798acfe307ac6b5 | /Miscellaneous/Python XML Parser/Example.py | ef7e6dc6952d02a5cb41a0c433b4bb1594c14bce | [] | no_license | sirinenisaikiran/Python | 538f64276767435de3233b720f547aac0bf4d511 | bdfef0d1c04c7f3b9fc91a164b5fd1789828176c | refs/heads/master | 2023-01-31T00:53:01.650916 | 2021-06-06T10:39:20 | 2021-06-06T10:39:20 | 237,744,104 | 0 | 0 | null | 2023-01-26T03:38:47 | 2020-02-02T08:58:49 | Python | UTF-8 | Python | false | false | 455 | py | import xml.etree.ElementTree as ET
mytree = ET.parse('Sample.xml')
myroot = mytree.getroot()
# print(myroot)
# print(myroot.tag)
# print(myroot[0].tag)
# print(myroot[0].attrib)
#
# for x in myroot[0]:
# print(x.tag, x.attrib)
# for x in myroot[0]:
# print(x.text)
# for x in myroot[0]:
# print(x.tag, x.attrib, x.text)
for x in myroot.findall('food'):
item = x.find('item').text
price = x.find('price').text
print(item,price) | [
"saikiran.sirneni@gmail.com"
] | saikiran.sirneni@gmail.com |
00f52c6cf6c7645f0524b3ed9f86a1bf017a892b | c74db84433f8a5f9199678b52bc9770083c30f53 | /programing/dataStructure/heap/heap.py | ed6efe9f2567dc4135bc8f746c4d311c9f074718 | [] | no_license | wiseun/TIL | fd4708a4ec064d0d1b2f681caafdcea98e7fbf34 | f337a185a6911526263e2446519fa5d78de79dd3 | refs/heads/master | 2021-06-08T09:13:05.868110 | 2021-05-07T06:03:18 | 2021-05-07T06:03:18 | 94,679,460 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,999 | py | #!/usr/bin/python3
import random
import sys
class MinHeap:
def __init__(self):
self.data = []
def getSize(self):
return len(self.data)
def reBuildingWhenPush(self):
idx = self.getSize() - 1
while idx > 0:
parent = int((idx - 1) / 2)
left = parent * 2 + 1
right = parent * 2 + 2
# Have only left child
if self.getSize() <= right:
if self.data[parent] > self.data[left]:
self.data[parent], self.data[left] = self.data[left], self.data[parent]
idx = parent
continue
if self.data[left] < self.data[parent] and self.data[left] < self.data[right]:
self.data[parent], self.data[left] = self.data[left], self.data[parent]
elif self.data[right] < self.data[parent] and self.data[right] < self.data[left]:
self.data[parent], self.data[right] = self.data[right], self.data[parent]
idx = parent
def reBuildingWhenPop(self):
idx = 0
while idx < self.getSize() - 1:
left = idx * 2 + 1
right = idx * 2 + 2
if self.getSize() <= left:
break
# Have only left child
if self.getSize() <= right:
if self.data[idx] > self.data[left]:
self.data[idx], self.data[left] = self.data[left], self.data[idx]
idx = left
continue
if self.data[left] < self.data[idx] and self.data[left] < self.data[right]:
self.data[idx], self.data[left] = self.data[left], self.data[idx]
idx = left
elif self.data[right] < self.data[idx] and self.data[right] < self.data[left]:
self.data[idx], self.data[right] = self.data[right], self.data[idx]
idx = right
else:
break
def push(self, value):
self.data.append(value)
self.reBuildingWhenPush()
def pop(self):
if self.getSize() == 0:
return 0
if self.getSize() != 1:
self.data[0], self.data[-1] = self.data[-1], self.data[0]
value = self.data.pop()
self.reBuildingWhenPop()
return value
if __name__ == "__main__":
minHeap = MinHeap()
for j in range(1, 1000):
# make test data
testSize = j
testSet = [i for i in range(1, 1 + testSize)]
random.shuffle(testSet)
for i in testSet:
minHeap.push(i)
if minHeap.getSize() != testSize:
print(str(j) + ": Test is fail: MinHeap.getSize()")
sys.exit(-1)
for i in range(1, 1 + testSize):
value = minHeap.pop()
#print(str(i) + ", " + str(value))
if i != value:
print(str(j) + ": Test is fail: MinHeap")
sys.exit(-1)
print(str(j) + ": Test is pass")
| [
"dongheon.kim@lge.com"
] | dongheon.kim@lge.com |
6668ad6d2a23a2b39e19b176c96af3cd8ff06f5b | 703926c99852ac67a4d4fa9009364ad26fe254d5 | /dices.py | a4c959a24f55476956d7d9000d6c3ea81927617c | [
"MIT"
] | permissive | mariamingallonMM/AI-ML-W4-normal-probability-distribution | e6196b3e6b752d8cb850a9b2d31d7ebf69c84752 | 95569929078b22555f870675f27aeca29f8ce487 | refs/heads/main | 2023-05-12T02:09:14.167027 | 2021-06-04T02:03:25 | 2021-06-04T02:03:25 | 336,903,772 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 917 | py | import numpy as np
def probability_of_sum(total:int, dice1, dice2):
"""
Brief:
Basic probability - Dice cast
Suppose a pair of fair 6-sided dice are thrown.
What is the probability that the sum of the rolls is 6? (Answer as a simple fraction of integers)
reference: https://statweb.stanford.edu/~susan/courses/s60/split/node65.html
"""
n = dice1.shape[0]
m = dice2.shape[0]
comb = n * m
count = 0
for i in dice1:
for j in dice2:
sum = int(i + j)
if sum == total:
count += 1
prob = count / comb
return print("{:.2%}".format(prob))
# define the dice as a linear array of 1 to 6, all integers
dice1 = np.linspace(1,6,6,dtype=int)
# call the function above with the total for which we would like to calculate the probability with 2 dices
prob = probability_of_sum(6, dice1, dice1)
| [
"maria.mingallon@mottmac.com"
] | maria.mingallon@mottmac.com |
845db2f47f763ae4e09097e253320bf541736141 | 53eee7eb899cb518983008532257037fb89def13 | /343.integer-break.py | e226facec72a5754c30be689c04e5eec6a509a9c | [] | no_license | chenxu0602/LeetCode | 0deb3041a66cb15e12ed4585bbe0fefce5dc6b26 | 3dc5af2bc870fcc8f2142130fcd2b7cab8733151 | refs/heads/master | 2023-07-05T19:26:21.608123 | 2023-07-02T08:35:35 | 2023-07-02T08:35:35 | 233,351,978 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,425 | py | #
# @lc app=leetcode id=343 lang=python3
#
# [343] Integer Break
#
# https://leetcode.com/problems/integer-break/description/
#
# algorithms
# Medium (50.19%)
# Likes: 1086
# Dislikes: 227
# Total Accepted: 110.4K
# Total Submissions: 219.2K
# Testcase Example: '2'
#
# Given a positive integer n, break it into the sum of at least two positive
# integers and maximize the product of those integers. Return the maximum
# product you can get.
#
# Example 1:
#
#
#
# Input: 2
# Output: 1
# Explanation: 2 = 1 + 1, 1 × 1 = 1.
#
#
# Example 2:
#
#
# Input: 10
# Output: 36
# Explanation: 10 = 3 + 3 + 4, 3 × 3 × 4 = 36.
#
# Note: You may assume that n is not less than 2 and not larger than 58.
#
#
#
# @lc code=start
import math
class Solution:
def integerBreak(self, n: int) -> int:
# if n == 2:
# return 1
# if n == 3:
# return 2
# dp = [0] * (n + 1)
# dp[2] = 2
# dp[3] = 3
# for i in range(4, n + 1):
# dp[i] = max(dp[i-2] * 2, dp[i-3] * 3)
# return dp[n]
# O(logN)
if n == 2:
return 1
elif n == 3:
return 2
elif n % 3 == 0:
return int(math.pow(3, n // 3))
elif n % 3 == 1:
return 2 * 2 * int(math.pow(3, (n - 4) // 3))
else:
return 2 * int(math.pow(3, n // 3))
# @lc code=end
| [
"chenxu@Chens-iMac.local"
] | chenxu@Chens-iMac.local |
024133573c36b462e604a560f436aea52c5c3ff9 | 9de7a7a7474c655a12917927ab3a97be4383850f | /abricate.py | 1147fda84424234e85d39af4a058c545464c4f73 | [] | no_license | gopel/clonalpop | ca5fc1d03c8dfc575f5bc18404595c28f645c92b | 13b55d85858d783b3a04cbdcb41bfc5aa9b2a512 | refs/heads/master | 2020-05-04T19:04:55.302064 | 2019-04-04T15:33:39 | 2019-04-04T15:33:39 | 179,378,638 | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 26,763 | py | # -*-coding:Latin-1 -*
import os
def abricate(output_path, element) :
'''os.makedirs(output_path + "/" + element + "/Abricate", exist_ok=True)'''
os.system("docker run replikation/abricate --db card " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_AntibioRes_CARD.txt")
os.system("abricate --db resfinder " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")
os.system("abricate --db ncbi " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")
os.system("abricate --db ecoli_vf " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")
os.system("abricate --db vfdb " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")
os.system("abricate --db plasmidfinder " + output_path + "/" + element + "/Prokka/" + element + ".fna > " + output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")
#abricate(letter_illumina)
# Faire apparaitre les gines puis faireune matrice d'analyse presence absence
# 1 gros tableau bourrin avec toutes les infos comme prevu au debut
# Plusieurs slides apres av
def bacteria_resistance(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
list_protein_result = []
for phrase in contenu.split('\n') :
phrases.append(phrase)
new_phrases = phrases [13:-1]
result = ""
gene_result =""
protein_result = []
for phrase in new_phrases :
#print(phrase)
mini_phrase = phrase.split()
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
#mini_result = "Gene: "+ gene + ", Protein: " + product + " (" + trust_coefficient + ") \n "
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
#mini_gene_result = [ gene + "(" + trust_coefficient + " \n "]
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_virulence_ECVF(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
list_protein_result = []
for phrase in contenu.split('\n') :
phrases.append(phrase)
new_phrases = phrases [13:-1]
result = ""
gene_result = ''#[]
protein_result = []
for phrase in new_phrases :
#print(phrase)
mini_phrase = phrase.split()
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
#mini_result = "Gene: "+ gene + ", Protein: " + product + " (" + trust_coefficient + ") \n "
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_virulence_VDFB(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
list_protein_result= []
for phrase in contenu.split('\n') :
phrases.append(phrase)
new_phrases = phrases [13:-1]
result = ""
gene_result =""
protein_result = []
for phrase in new_phrases :
#print(phrase)
mini_phrase = phrase.split()
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
#mini_result = "Gene: "+ gene + ", Protein: " + product + " (" + trust_coefficient + ") \n "
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_PlasmidFinder(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
element = phrase.split('\t')
for mini_element in element :
phrases.append(mini_element)
#print(phrases)
new_phrases = phrases [13:-1]
n_phrases = int(len(new_phrases)/13)
result = ""
gene_result = ""
protein_result = []
list_protein_result= []
#print(new_phrases)
for k in range(n_phrases) :
mini_phrase = new_phrases[0:13]
new_phrases = new_phrases[13:]
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
#product_1 = mini_phrase[12]
#product_2 = mini_phrase[13:]
#product = str(product_1) + "("
#for mini_product in product_2 :
# product += str(mini_product) + " "
#product+= ")"
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_AntimicRes_ResFinder(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
element = phrase.split('\t')
for mini_element in element :
phrases.append(mini_element)
#print(phrases)
new_phrases = phrases [13:-1]
n_phrases = int(len(new_phrases)/13)
result = ""
gene_result = ""
protein_result = []
list_protein_result =[]
#print(new_phrases)
for k in range(n_phrases) :
mini_phrase = new_phrases[0:13]
new_phrases = new_phrases[13:]
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
#product_1 = mini_phrase[12]
#product_2 = mini_phrase[13:]
#product = str(product_1) + "("
#for mini_product in product_2 :
# product += str(mini_product) + " "
#product+= ")"
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
def bacteria_AntimicRes_NCBI(file) :
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
element = phrase.split('\t')
for mini_element in element :
phrases.append(mini_element)
#print(phrases)
new_phrases = phrases [13:-1]
n_phrases = int(len(new_phrases)/13)
result = ""
gene_result = ""
protein_result = []
list_protein_result = []
#print(new_phrases)
for k in range(n_phrases) :
mini_phrase = new_phrases[0:13]
new_phrases = new_phrases[13:]
#print(mini_phrase)
#new_phrases = new_phrases [13:]
localisation = mini_phrase[1]
gene = mini_phrase[4]
coverage = float(mini_phrase[8])
identity = float(mini_phrase[9])
trust_coefficient = str(round(coverage*identity/10000,2))
#product_1 = mini_phrase[12]
#product_2 = mini_phrase[13:]
#product = str(product_1) + "("
#for mini_product in product_2 :
# product += str(mini_product) + " "
#product+= ")"
product = ""
for k in range(12, len(mini_phrase)):
product += mini_phrase[k] + " " #str(mini_phrase[12:])
mini_protein_result = "Gene: " + gene + ", Protein: " + product + " \n "
mini_gene_result = gene + "\n "
gene_result += mini_gene_result
mini_list_protein_result = [gene, product]
list_protein_result.append(mini_list_protein_result)
protein_result.append(mini_protein_result)
#gene_result.append(mini_gene_result)
#print(list_protein_result)
fichier.close()
return (gene_result, protein_result, list_protein_result)
## REGLER CA AUSSI
def extracting_everything_abricate(output_path, acces_dossier_compare) :
gene_list = [['resistance','virulence_ECVF','virulence_VDFB','PlasmidFinder','AntimicRes_ResFinder','AntimicRes_NCBI']]
protein_list = [['resistance', 'virulence_ECVF', 'virulence_VDFB', 'PlasmidFinder', 'AntimicRes_ResFinder','AntimicRes_NCBI']]
for element in acces_dossier_compare:
mini_gene_list = []
output_path + "/" + element + "/Abricate/" + element
mini_gene_list.append(bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntibioRes_CARD.txt")[0])
mini_gene_list.append(bacteria_virulence_ECVF(output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")[0])
mini_gene_list.append(bacteria_virulence_VDFB(output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")[0])
mini_gene_list.append(bacteria_PlasmidFinder(output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")[0])
mini_gene_list.append(bacteria_AntimicRes_ResFinder(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")[0])
mini_gene_list.append(bacteria_AntimicRes_NCBI(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")[0])
gene_list.append(mini_gene_list)
return gene_list
'''
# Feuille generale
#extracting_everything_abricate()
total_abricate = abricate.extracting_everything_abricate()
total_abricate = total_abricate[1:]
for element in letter_illumina:
sample_ID = ID + element
feuil3.write(k, 0, sample_ID, style_cells)
for element in total_abricate:
print(element)
#feuil3.write(k, 1, element[0], style_cells)
k+=1
'''
#print(extracting_everything_abricate())
# Liste avec tous les echantillons, dans chaque sous-liste 6 listes (on garde les 6)
# Liste interessante : list_protein_result, chaque element = une liste de deux elements (on garde la 2 (troisieme)
# [gene, product] peut etre garder juste gene et ajouter '\n' à chaque fois (on garde le 0 (premier))
### Extraire des donnes pour lindex des proteines (termine)
def extracting_data_for_protein_index(acces_dossier_compare, output_path):
gene_list = []
protein_list = []
for element in acces_dossier_compare:
mini_protein_list = []
mini_protein_list.append(bacteria_resistance(output_path + "/" + element + "/Abricate/" + element +"_AntibioRes_CARD.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")[1])
mini_protein_list.append(
bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")[1])
#mini_protein_list.append(bacteria_resistance("/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntibioRes_CARD.txt")[1])
#mini_protein_list.append(bacteria_virulence_ECVF(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_ECVF.txt")[1])
#mini_protein_list.append(bacteria_virulence_VDFB(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_VFDB.txt")[1])
#mini_protein_list.append(bacteria_PlasmidFinder(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Plasmids_PlasmidFinder.txt")[1])
#mini_protein_list.append(bacteria_AntimicRes_ResFinder(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_ResFinder.txt")[1])
#mini_protein_list.append(bacteria_AntimicRes_NCBI(
# "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(
# child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_NCBI.txt")[1])
protein_list.append(mini_protein_list)
#print(protein_list)
protein_list_AntibioRes_CARD = []
protein_list_Virulence_ECVF = []
protein_list_Virulence_VFDB = []
protein_list_Plasmids_PlasmidFinder = []
protein_list_AntimicRes_ResFinder = []
protein_list_AntimicRes_NCBI = []
for element in protein_list :
protein_list_AntibioRes_CARD.append(element[0])
protein_list_Virulence_ECVF.append(element[1])
protein_list_Virulence_VFDB.append(element[2])
protein_list_Plasmids_PlasmidFinder.append(element[3])
protein_list_AntimicRes_ResFinder.append(element[4])
protein_list_AntimicRes_NCBI.append(element[5])
#print(protein_list_AntibioRes_CARD)
protein_string_AntibioRes_CARD =""
protein_string_Virulence_ECVF = ""
protein_string_Virulence_VFDB = ""
protein_string_Plasmids_PlasmidFinder = ""
protein_string_AntimicRes_ResFinder = ""
protein_string_AntimicRes_NCBI = ""
for element in protein_list_AntibioRes_CARD :
for sous_element in element :
if sous_element not in protein_string_AntibioRes_CARD :
protein_string_AntibioRes_CARD += sous_element
for element in protein_list_Virulence_ECVF :
for sous_element in element :
if sous_element not in protein_string_Virulence_ECVF :
protein_string_Virulence_ECVF += sous_element
for element in protein_list_Virulence_VFDB :
for sous_element in element :
if sous_element not in protein_string_Virulence_VFDB :
protein_string_Virulence_VFDB += sous_element
for element in protein_list_Plasmids_PlasmidFinder :
for sous_element in element :
if sous_element not in protein_string_Plasmids_PlasmidFinder :
protein_string_Plasmids_PlasmidFinder += sous_element
for element in protein_list_AntimicRes_ResFinder :
for sous_element in element :
if sous_element not in protein_string_AntimicRes_ResFinder :
protein_string_AntimicRes_ResFinder += sous_element
for element in protein_list_AntimicRes_NCBI :
for sous_element in element :
if sous_element not in protein_string_AntimicRes_NCBI :
protein_string_AntimicRes_NCBI += sous_element
return protein_string_AntibioRes_CARD, protein_string_Virulence_ECVF, protein_string_Virulence_VFDB, protein_string_Plasmids_PlasmidFinder, protein_string_AntimicRes_ResFinder, protein_string_AntimicRes_NCBI
#print(extracting_data_for_protein_index()[2])
def extracting_data_for_protein_index_2(output_path, acces_dossier_compare) :
gene_list = []
protein_list = []
for element in acces_dossier_compare:
mini_protein_list = []
mini_protein_list.append(bacteria_resistance(output_path + "/" + element + "/Abricate/" + element + "_AntibioRes_CARD.txt")[2])
mini_protein_list.append(bacteria_virulence_ECVF(output_path + "/" + element + "/Abricate/" + element + "_Virulence_ECVF.txt")[2])
mini_protein_list.append(bacteria_virulence_VDFB(
output_path + "/" + element + "/Abricate/" + element + "_Virulence_VFDB.txt")[2])
mini_protein_list.append(bacteria_PlasmidFinder(
output_path + "/" + element + "/Abricate/" + element + "_Plasmids_PlasmidFinder.txt")[2])
mini_protein_list.append(bacteria_AntimicRes_ResFinder(
output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_ResFinder.txt")[2])
mini_protein_list.append(bacteria_AntimicRes_NCBI(
output_path + "/" + element + "/Abricate/" + element + "_AntimicRes_NCBI.txt")[2])
protein_list.append(mini_protein_list)
#print(protein_list)
protein_list_AntibioRes_CARD = []
protein_list_Virulence_ECVF = []
protein_list_Virulence_VFDB = []
protein_list_Plasmids_PlasmidFinder = []
protein_list_AntimicRes_ResFinder = []
protein_list_AntimicRes_NCBI = []
for element in protein_list:
for sous_element in element[0] :
if sous_element not in protein_list_AntibioRes_CARD :
protein_list_AntibioRes_CARD.append(sous_element)
for sous_element in element[1]:
if sous_element not in protein_list_Virulence_ECVF :
protein_list_Virulence_ECVF.append(sous_element)
for sous_element in element[2]:
if sous_element not in protein_list_Virulence_VFDB :
protein_list_Virulence_VFDB.append(sous_element )
for sous_element in element[3]:
if sous_element not in protein_list_Plasmids_PlasmidFinder:
protein_list_Plasmids_PlasmidFinder.append(sous_element )
for sous_element in element[4]:
if sous_element not in protein_list_AntimicRes_ResFinder :
protein_list_AntimicRes_ResFinder.append(sous_element)
for sous_element in element[5]:
if sous_element not in protein_list_AntimicRes_NCBI:
protein_list_AntimicRes_NCBI.append(sous_element )
return protein_list_AntibioRes_CARD , protein_list_Virulence_ECVF , protein_list_Virulence_VFDB, protein_list_Plasmids_PlasmidFinder,protein_list_AntimicRes_ResFinder,protein_list_AntimicRes_NCBI
'''
protein_list_AntibioRes_CARD , protein_list_Virulence_ECVF , protein_list_Virulence_VFDB, protein_list_Plasmids_PlasmidFinder,protein_list_AntimicRes_ResFinder,protein_list_AntimicRes_NCBI = extracting_data_for_protein_index_2()
### Combining reports across samples
def abricate_report_across_samples(letter_illumina) :
# for element in letter_illumina:
# sample_ID = ID + element
# try:
# os.mkdir("/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/")
# except OSError:
# pass
# os.system("abricate --db card /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntibioRes_CARD.tab")
# os.system("abricate --db resfinder /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_ResFinder.tab")
# os.system("abricate --db ncbi /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_NCBI.tab")
# os.system("abricate --db ecoli_vf /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_ECVF.tab")
# os.system("abricate --db vfdb /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_VFDB.tab")
# os.system("abricate --db plasmidfinder /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Prokka/" + sample_ID + "_illumina_prokka.fna > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Plasmids_PlasmidFinder.tab")
sentence_AntibioRes_CARD =''
sentence_AntimicRes_ResFinder =''
sentence_AntimicRes_NCBI =''
sentence_Virulence_ECVF =''
sentence_Virulence_VFDB = ''
sentence_Plasmids_PlasmidFinder =''
for element in letter_illumina:
sample_ID = ID + element
sentence_AntibioRes_CARD += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntibioRes_CARD.tab "
sentence_AntimicRes_ResFinder += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_ResFinder.tab "
sentence_AntimicRes_NCBI += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_AntimicRes_NCBI.tab "
sentence_Virulence_ECVF += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_ECVF.tab "
sentence_Virulence_VFDB += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Virulence_VFDB.tab "
sentence_Plasmids_PlasmidFinder += "/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/" + sample_ID + "/Abricate/" + sample_ID + "_Plasmids_PlasmidFinder.tab "
try:
os.mkdir("/Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples")
except OSError:
pass
os.system("abricate --summary " + sentence_AntibioRes_CARD +" > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/AntibioRes_CARD_report_samples.txt")
os.system("abricate --summary " + sentence_AntimicRes_ResFinder + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/AntimicRes_ResFinder_report_samples.txt")
os.system("abricate --summary " + sentence_AntimicRes_NCBI + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/AntimicRes_NCBI_report_samples.txt")
os.system("abricate --summary " + sentence_Virulence_ECVF + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/Virulence_ECVF_report_samples.txt")
os.system("abricate --summary " + sentence_Virulence_VFDB + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/Virulence_VFDB_report_samples.txt")
os.system("abricate --summary " + sentence_Plasmids_PlasmidFinder + " > /Users/Yanis/Desktop/Projet_de_recherche/Genomes/enfant_" + str(child) + "/Abricate_Reports_across_samples/Plasmids_PlasmidFinder_report_samples.txt")
abricate_report_across_samples(letter_illumina)'''
# Extracting phrases report samples
def extracting_report_samples(file):
fichier = open(file, "r")
contenu = fichier.read()
phrases = []
for phrase in contenu.split('\n') :
phrases.append(phrase)
#print(phrases)
new_phrases = []
for element in phrases :
new_phrases.append(element.split('\t'))
n = len(new_phrases[0])
return(new_phrases, n) | [
"noreply@github.com"
] | noreply@github.com |
6c3f8ad91c11294558986e5612928dcb59119e90 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/281/81893/submittedfiles/testes.py | 9d5ad8d30fc63ed816896c55f3d77b98a8e9722a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 157 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
x=int(input('Digite um número:'))
while x>0 and x<=13:
print('Ok')
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
855c082aa1c28384a3ca3f6688c7cd52583b2287 | 47e93b916a6b55871997bfa95bb2f69676416b00 | /landerdb.py | 0486a4742f580c46200c8342d154cb857fb29434 | [] | no_license | Inqre/Melody | dcc88acb83b23a3c0786ab5b9529b1dcd71f6ece | 84f298e5446f53c5f3fededd9f2920552db74c87 | refs/heads/master | 2020-05-15T22:32:28.959905 | 2013-11-08T02:45:06 | 2013-11-08T02:45:06 | 14,127,017 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,807 | py | import json
import os
__version__ = "1.0.0"
class Connect:
def __init__(self, db_file):
self.db = db_file
self.json_data = {}
# allows find to be called multiple times, without
# re-reading from disk unless a change has occured
self.stale = True
if not os.path.exists(self.db):
self._save()
def _load(self):
if self.stale:
with open(self.db, 'rb') as fp:
try:
self.json_data = json.load(fp)
except:
with open(self.db, 'wb') as file:
file.write(json.dumps(self.json_data))
self._load()
def _save(self):
with open(self.db, 'wb') as fp:
json.dump(self.json_data, fp)
self.stale = True
def insert(self, collection, data):
self._load()
if collection not in self.json_data:
self.json_data[collection] = []
self.json_data[collection].append(data)
self._save()
def remove(self, collection, data):
self._load()
if collection not in self.json_data:
return False
self.json_data[collection].remove(data) #Will only delete one entry
self._save()
def find(self, collection, data):
self._load()
if collection not in self.json_data:
return False
output = []
for x in self.json_data[collection]:
if data != "all":
for y in data:
try:
if data[y] == x[y]:
output.append(x)
except KeyError:
continue
else:
output.append(x)
return output
| [
"max00355@gmail.com"
] | max00355@gmail.com |
8074d9f48b99a19a25b95da45d02787fb65ed44d | 771247a4498d50745c5fbff09e7446ea9213ab19 | /Py8/export_openweather.py | a80a7c5c48213f7a13b051fcbfb593a6a75dd25e | [] | no_license | ostrowsky/Parcer | 42697f9a98f42c8220675d540e8dc2a95855783e | f953b7cbb6b948df894950ee7ed804fcd6b8e811 | refs/heads/master | 2021-01-21T06:39:46.184872 | 2017-06-23T16:07:15 | 2017-06-23T16:07:15 | 91,581,143 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,845 | py | """ OpenWeatherMap (экспорт)
Сделать скрипт, экспортирующий данные из базы данных погоды,
созданной скриптом openweather.py. Экспорт происходит в формате CSV или JSON.
Скрипт запускается из командной строки и получает на входе:
export_openweather.py --csv filename [<город>]
export_openweather.py --json filename [<город>]
export_openweather.py --html filename [<город>]
При выгрузке в html можно по коду погоды (weather.id) подтянуть
соответствующие картинки отсюда: http://openweathermap.org/weather-conditions
Экспорт происходит в файл filename.
Опционально можно задать в командной строке город. В этом случае
экспортируются только данные по указанному городу. Если города нет в базе -
выводится соответствующее сообщение.
"""
import sys
import sqlite3
db_filename = 'db_weather.sqlite'
#sys.argv = ['export_openweather.py', 'weather.html', 'MX']
try:
filename = sys.argv[1]
country = sys.argv[2]
except IndexError:
print("Задан неверный параметр. Файл должен быть запущен с указанием параметров: export_openweather.py filename [<город>]")
print(sys.argv)
html_string = '''
<!DOCTYPE html>
<html>
<head>
<title>Weather</title>
</head>
<body>
<h1>Погода на момент актуализации базы данных</h1>
<table border = "1">
<tbody>
<tr>
<th align="center" width="auto">id_города</th>
<th align="center" width="auto">Город</th>
<th align="center" width="auto">Страна</th>
<th align="center" width="auto">Дата</th>
<th align="center" width="auto">Температура</th>
<th align="center" width="auto">id_погоды</th>
<th align="center" width="auto">Значок</th>
</tr>
'''
if len(sys.argv) == 3:
with sqlite3.connect(db_filename) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('''
select distinct id_города, Город, Страна, Дата, Температура, id_погоды, Значок
from weather
where Страна = ?''', (country,))
db_rows = cur.fetchall()
cities = list(db_rows)
for city in cities:
#print(list(city))
if city:
#print(city)
#print(list(city))
html_string += '\t<tr>\n'
for k in list(city):
if k == list(city)[-1]:
path = "http://openweathermap.org/img/w/" + str(k) + ".png"
html_string += '\t\t<td align="center" width="auto"><img src=' + path + '></td>\n'
else:
html_string += '\t\t<td align="center" width="auto">' + str(k) + '</td>\n'
html_string += '\t</tr>\n'
else:
print("Города указанной страны отсутствуют в базе")
html_string += '''
</tbody>
</table>
</body>
</html>'''
elif len(sys.argv) == 4:
city = sys.argv[3]
with sqlite3.connect(db_filename) as conn:
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute('''
select distinct id_города, Город, Страна, Дата, Температура, id_погоды, Значок
from weather
where Город = ? and Страна = ?''', (city, country,))
db_rows = cur.fetchall()
cities = list(db_rows)
for city in cities:
# print(list(city))
if city:
# print(city)
# print(list(city))
html_string += '\t<tr>\n'
for k in list(city):
if k == list(city)[-1]:
path = "http://openweathermap.org/img/w/" + str(k) + ".png"
html_string += '\t\t<td align="center" width="auto"><img src=' + path + '></td>\n'
else:
html_string += '\t\t<td align="center" width="auto">' + str(k) + '</td>\n'
html_string += '\t</tr>\n'
else:
print("Город отсутствует в базе")
html_string += '''
</tbody>
</table>
</body>
</html>'''
encoded_str = html_string.encode(encoding='UTF-8')
with open(filename, 'w', encoding='UTF-8') as f:
f.write(html_string)
| [
"ostrowskyi@gmail.com"
] | ostrowskyi@gmail.com |
05ffc138a8dfcb6c084d4ff20b53ae4b7261b8b4 | 26a97032622f10c47e1961ded98023f2daf539d2 | /src/customers/forms.py | b550242183c540ec43a371955878f58d0ce823dc | [] | no_license | mycota/laundry_MS | 7ada777bc4a6cd746152b44b7257064db8465beb | ab41a70202717957b694152590b72a52d0fb1bff | refs/heads/master | 2023-06-02T15:10:43.466619 | 2021-06-22T02:25:49 | 2021-06-22T02:25:49 | 379,100,888 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,093 | py | from django import forms
from django.contrib.auth.models import User
from .models import Customers
class AddCustomerForm(forms.ModelForm):
gend = (('Male', 'Male'), ('Famale', 'Famale'),)
cust_name = forms.CharField(max_length=70)
cust_phone = forms.CharField(max_length=10)
cust_email = forms.CharField(max_length=100)
cust_address = forms.CharField(widget=forms.Textarea,max_length=225)
cust_gender = forms.ChoiceField(choices=gend)
# balance = forms.FloatField()
class Meta:
model = Customers
fields = ['cust_name', 'cust_phone', 'cust_email', 'cust_address', 'cust_gender']
class UpdateCustomerForm(forms.ModelForm):
gend = (('Male', 'Male'), ('Famale', 'Famale'),)
cust_name = forms.CharField(max_length=70)
cust_phone = forms.CharField(max_length=10)
cust_email = forms.CharField(max_length=100)
cust_address = forms.CharField(widget=forms.Textarea,max_length=225)
cust_gender = forms.ChoiceField(choices=gend)
# balance = forms.FloatField()
class Meta:
model = Customers
fields = ['cust_name', 'cust_phone', 'cust_email', 'cust_address', 'cust_gender'] | [
"universaltechsolutionsuts@gmail.com"
] | universaltechsolutionsuts@gmail.com |
85c8b2f42aed216a99f935dec957f601a6e4c545 | b2521e5fa0b0e59bddbdafd5b3b96d8ad3198379 | /GameOfThrones_partI.py | edb89eb1ecc78e0b6bda083a19fce97f5b5ee8ef | [] | no_license | llpyyz/HackerRank_Warmup | aa0db25cdce4fe9b4899033dc9fda295e7bddbb2 | b9628306e684aaed1673305a5256433e317c5cc0 | refs/heads/master | 2021-01-19T13:53:07.155099 | 2015-01-16T23:37:28 | 2015-01-16T23:37:28 | 29,372,468 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | """
David Schonberger
Hackerrank.com
Warmpup - Game of Thrones - I
1/4/2015
"""
#count occurence of c in s
def chr_count(c,s):
return len([ch for ch in s if ch == c])
string = raw_input()
found = False
l = [ch for ch in string]
l.sort()
chr_set = set(l)
if(len(l) % 2 == 0):
if(sum([chr_count(c,l) % 2 == 1 for c in chr_set]) == 0):
found = True
else:
if(sum([chr_count(c,l) % 2 == 1 for c in chr_set]) == 1):
found = True
if not found:
print("NO")
else:
print("YES")
| [
"llp_yyz@hotmail.com"
] | llp_yyz@hotmail.com |
a2a2275184e0dde13affe5fbe7484ad6d9b28750 | e3ecb87551f72c201fe6a9fbff772614cfb5ed4c | /mnist_qkeras2.py | ed5c2fb01e719d5efa76e2ecf5c08950db147fed | [
"MIT"
] | permissive | filipemlins/nas-hls4ml | 6cccdc7c061a2d1071e1328e5121aa4038b8fedd | b35afc4f684d803d352776c40f3a6cbbf47c4b1c | refs/heads/main | 2023-03-12T23:11:35.316667 | 2021-03-03T02:09:02 | 2021-03-03T02:09:02 | 343,616,339 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,792 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 2 19:57:08 2020
@author: filipe
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 16:44:49 2020
@author: filipe
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 15:05:57 2020
@author: filipe
"""
from tensorflow.keras.utils import to_categorical
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
##pre processing
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
Y_train1 = train[['label']]
X_train1 = train.drop(train.columns[[0]], axis=1)
X_test1 = test
X_train1 = np.array(X_train1)
X_test1 = np.array(X_test1)
#Reshape the training and test set
X_train1 = X_train1.reshape(X_train1.shape[0], 28, 28, 1)/255
X_test1 = X_test1.reshape(X_test1.shape[0], 28, 28, 1)/255
#Padding the images by 2 pixels since in the paper input images were 32x32
X_train1 = np.pad(X_train1, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_test1 = np.pad(X_test1, ((0,0),(2,2),(2,2),(0,0)), 'constant')
X_train, X_test, Y_train, Y_test = train_test_split(X_train1, Y_train1, test_size=0.2, random_state=42)
#Standardization
mean_px = X_train.mean().astype(np.float32)
std_px = X_train.std().astype(np.float32)
X_train = (X_train - mean_px)/(std_px)
#One-hot encoding the labels
Y_train = to_categorical(Y_train)
print(X_train.shape[0], "train samples")
print(X_test.shape[0], "test samples")
#scaler = StandardScaler().fit(X_train)
#X_train = scaler.transform(X_train)
#X_test = scaler.transform(X_test)
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l1
from callbacks import all_callbacks
from tensorflow.keras.layers import Activation, MaxPooling2D, Flatten
from qkeras.qlayers import QDense, QActivation
from qkeras.qconvolutional import QConv2D
from qkeras.quantizers import quantized_bits, quantized_relu
model = Sequential()
model.add(QConv2D(8, (4, 4), strides=(1,1), input_shape=(32,32, 1),
kernel_quantizer=quantized_bits(7,1),bias_quantizer=quantized_bits(7,1), name="conv2d_0_m"))
model.add(QActivation(activation=quantized_relu(7,1), name='relu1'))
model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2), name='max1'))
model.add(QConv2D(
16, (2, 2), strides=(1,1),
kernel_quantizer=quantized_bits(7,1),
bias_quantizer=quantized_bits(7,1),
name="conv2d_1_m"))
model.add(QActivation(activation=quantized_relu(7,1), name='relu2'))
model.add(MaxPooling2D(pool_size = (2,2), strides = (2,2), name='max2'))
model.add(Flatten())
model.add(QDense(120, name='fc1',
kernel_quantizer=quantized_bits(7,1), bias_quantizer=quantized_bits(7,1),
kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001)))
model.add(QActivation(activation=quantized_relu(7,1), name='relu3'))
model.add(QDense(84, name='fc2',
kernel_quantizer=quantized_bits(7,1), bias_quantizer=quantized_bits(7,1),
kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001)))
model.add(QActivation(activation=quantized_relu(7,1), name='relu4'))
model.add(QDense(10, name='output',
kernel_quantizer=quantized_bits(7,1), bias_quantizer=quantized_bits(7,1),
kernel_initializer='lecun_uniform', kernel_regularizer=l1(0.0001)))
model.add(Activation(activation='softmax', name='softmax'))
#from tensorflow_model_optimization.python.core.sparsity.keras import prune, pruning_callbacks, pruning_schedule
#from tensorflow_model_optimization.sparsity.keras import strip_pruning
#pruning_params = {"pruning_schedule" : pruning_schedule.ConstantSparsity(0.75, begin_step=2000, frequency=100)}
#model = prune.prune_low_magnitude(model, **pruning_params)
train = True
import keras
if train:
adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss=keras.losses.categorical_crossentropy, metrics=['accuracy'])
# callbacks = all_callbacks(stop_patience = 1000,
# lr_factor = 0.5,
# lr_patience = 10,
# lr_epsilon = 0.000001,
# lr_cooldown = 2,
# lr_minimum = 0.0000001,
# outputDir = 'model_3')
# callbacks.callbacks.append(pruning_callbacks.UpdatePruningStep())
model.fit(X_train, Y_train, batch_size=1024,
epochs=10, validation_split=0.25, shuffle=True)#, callbacks = callbacks.callbacks)
# Save the model again but with the pruning 'stripped' to use the regular layer types
# model = strip_pruning(model)
model.save('model_4/KERAS_check_best_model.h5')
else:
from tensorflow.keras.models import load_model
from qkeras.utils import _add_supported_quantized_objects
co = {}
_add_supported_quantized_objects(co)
model = load_model('model_4/KERAS_check_best_model.h5', custom_objects=co)
import hls4ml
hls4ml.model.optimizer.OutputRoundingSaturationMode.layers = ['Activation']
hls4ml.model.optimizer.OutputRoundingSaturationMode.rounding_mode = 'AP_RND'
hls4ml.model.optimizer.OutputRoundingSaturationMode.saturation_mode = 'AP_SAT'
config = hls4ml.utils.config_from_keras_model(model, granularity='name',
default_precision='ap_fixed<8,2,AP_RND,AP_SAT>', default_reuse_factor=30000)
config['LayerName']['softmax']['exp_table_t'] = 'ap_fixed<18,8>'
config['LayerName']['softmax']['inv_table_t'] = 'ap_fixed<18,4>'
print(config)
hls_model = hls4ml.converters.convert_from_keras_model(model, hls_config=config, output_dir='model_4/hls4ml_prj')
hls_model.compile()
import plotting
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from tensorflow.keras.models import load_model
#model_ref = load_model('model_1/KERAS_check_best_model.h5')
print("Accuracy quantized: {}".format(accuracy_score(Y_test, np.argmax(model.predict(X_test), axis=1))))
z = np.argmax(hls_model.predict(X_test), axis=1)
print("Accuracy hls4ml: {}".format(accuracy_score(Y_test, z)))
#print("Accuracy unpruned: {}".format(accuracy_score(np.argmax(y_test, axis=1), np.argmax(model_ref.predict(X_test), axis=1))))
#plt.figure(figsize=(9, 9))
#_ = plotting.makeRoc(X_train, Y_train, le.classes_, model)
##plt.gca().set_prop_cycle(None) # reset the colors
##_ = plotting.makeRoc(X_test, y_test, le.classes_, model_ref, linestyle='--')
#plt.gca().set_prop_cycle(None) # reset the colors
#_ = plotting.makeRoc(X_train, Y_train, le.classes_, hls_model, linestyle=':')
#
#hls_model.build(synth=True)
#
#hls4ml.report.read_vivado_report('model_3/hls4ml_prj')
| [
"filipemlins@gmail.com"
] | filipemlins@gmail.com |
f8b8795f039a88f35ab0c597b1aa46c3a393ceb6 | 2557ba8bd6a8621fac5cee27cc7917d28f850ac8 | /tools/validation_rod2021.py | 51831606c7c0e3b1446bfea457fc10722cb0fe26 | [
"MIT"
] | permissive | WanxinT/Balanced-RODNet | 1993a4e5518cad074013a634fad06b1e108a8fd1 | f6c9c5b4696b697254698cce65a97ec2d92c7a3c | refs/heads/main | 2023-03-27T20:19:28.584615 | 2021-03-29T06:07:46 | 2021-03-29T06:07:46 | 352,520,680 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,400 | py | # -*- coding:utf-8 -*-
"""
@author:Zehui Yu
@file: validation_rod2021.py
@time: 2021/01/31
"""
import sys
import os
from cruw import CRUW
from cruw.eval import evaluate_rod2021, evaluate_rod2021_APAR
import argparse
"python tools/validation_rod2021.py --config configs/my_config_rodnet_hg1_win16_lovasz_bs16_lr1e5_2020_2_11.py " \
" --checkpoint_name rodnet-hg1-win16-wobg-lovasz_bs16_lr1e5_2020_2_11-20210211-103511"
def parse_args():
parser = argparse.ArgumentParser(description='Test RODNet.')
parser.add_argument('--config', type=str, help='choose rodnet model configurations')
parser.add_argument('--checkpoint_name', type=str, default='./data/', help='directory to the prepared data')
args = parser.parse_args()
return args
def eval_rod2021_batch(config_file, checkpoint_name):
epoch_start, epoch_end = 1, 20
pkl_idx = list(range(epoch_start, epoch_end + 1))
for i in pkl_idx:
cmd = 'python tools/validation.py --config %s \
--data_dir /nfs/volume-95-8/ROD_Challenge/RODNet/data/zixiang_split/ \
--valid \
--checkpoint checkpoints/%s/epoch_%02d_final.pkl' % (config_file, checkpoint_name, i)
os.system(cmd)
data_root = "/nfs/volume-95-8/ROD_Challenge/src_dataset"
dataset = CRUW(data_root=data_root, sensor_config_name='sensor_config_rod2021')
submit_dir = '/nfs/volume-95-8/tianwanxin/RODNet/valid_results/%s' % checkpoint_name
truth_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/for_validation/gt_zixiang_split'
AP, AR = evaluate_rod2021_APAR(submit_dir, truth_dir, dataset)
# print('epoch: %d, AP: %.4f, AR: %.4f' % (i, AP, AR))
with open('/nfs/volume-95-8/tianwanxin/RODNet/valid_res/%s/valid_res.txt' % checkpoint_name, 'a') as f:
f.write('epoch: %d, AP: %.4f, AR: %.4f\n' % (i, AP, AR))
if __name__ == '__main__':
# data_root = "/nfs/volume-95-8/ROD_Challenge/src_dataset"
# dataset = CRUW(data_root=data_root, sensor_config_name='sensor_config_rod2021')
# submit_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/tools/valid_results/rodnet-hg1-win16-wobg-20210206-124028'
# truth_dir = '/nfs/volume-95-8/ROD_Challenge/RODNet/for_validation/gt_zixiang_split'
# ap, ar = evaluate_rod2021_APAR(submit_dir, truth_dir, dataset)
# print(ap, ar)
args = parse_args()
eval_rod2021_batch(args.config, args.checkpoint_name) | [
"noreply@github.com"
] | noreply@github.com |
09c2e1bc21335613f5e925b52bd82f0b8f9d9309 | 741c5c70bf4a0adb05db6b0777c8d07e28eb9cf6 | /lib/python3.4/site-packages/IPython/core/profileapp.py | 2a412589ca0dcc1cdc77a98a58967352a4566bca | [] | no_license | andybp85/hyLittleSchemer | e686d2dc0f9067562367ea1173f275e8e2d2cb85 | af5cb6adf6a196cc346aa7d14d7f9509e084c414 | refs/heads/master | 2021-01-19T07:48:31.309949 | 2015-01-04T00:57:30 | 2015-01-04T00:57:30 | 28,496,304 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,967 | py | # encoding: utf-8
"""
An application for managing IPython profiles.
To be invoked as the `ipython profile` subcommand.
Authors:
* Min RK
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2008 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
from IPython.config.application import Application
from IPython.core.application import (
BaseIPythonApplication, base_flags
)
from IPython.core.profiledir import ProfileDir
from IPython.utils.importstring import import_item
from IPython.utils.path import get_ipython_dir, get_ipython_package_dir
from IPython.utils import py3compat
from IPython.utils.traitlets import Unicode, Bool, Dict
#-----------------------------------------------------------------------------
# Constants
#-----------------------------------------------------------------------------
create_help = """Create an IPython profile by name
Create an ipython profile directory by its name or
profile directory path. Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. Once created, you will
can edit the configuration files in the profile
directory to configure IPython. Most users will create a
profile directory by name,
`ipython profile create myprofile`, which will put the directory
in `<ipython_dir>/profile_myprofile`.
"""
list_help = """List available IPython profiles
List all available profiles, by profile location, that can
be found in the current working directly or in the ipython
directory. Profile directories are named using the convention
'profile_<profile>'.
"""
profile_help = """Manage IPython profiles
Profile directories contain
configuration, log and security related files and are named
using the convention 'profile_<name>'. By default they are
located in your ipython directory. You can create profiles
with `ipython profile create <name>`, or see the profiles you
already have with `ipython profile list`
To get started configuring IPython, simply do:
$> ipython profile create
and IPython will create the default profile in <ipython_dir>/profile_default,
where you can edit ipython_config.py to start configuring IPython.
"""
_list_examples = "ipython profile list # list all profiles"
_create_examples = """
ipython profile create foo # create profile foo w/ default config files
ipython profile create foo --reset # restage default config files over current
ipython profile create foo --parallel # also stage parallel config files
"""
_main_examples = """
ipython profile create -h # show the help string for the create subcommand
ipython profile list -h # show the help string for the list subcommand
ipython locate profile foo # print the path to the directory for profile 'foo'
"""
#-----------------------------------------------------------------------------
# Profile Application Class (for `ipython profile` subcommand)
#-----------------------------------------------------------------------------
def list_profiles_in(path):
"""list profiles in a given root directory"""
files = os.listdir(path)
profiles = []
for f in files:
try:
full_path = os.path.join(path, f)
except UnicodeError:
continue
if os.path.isdir(full_path) and f.startswith('profile_'):
profiles.append(f.split('_',1)[-1])
return profiles
def list_bundled_profiles():
"""list profiles that are bundled with IPython."""
path = os.path.join(get_ipython_package_dir(), u'config', u'profile')
files = os.listdir(path)
profiles = []
for profile in files:
full_path = os.path.join(path, profile)
if os.path.isdir(full_path) and profile != "__pycache__":
profiles.append(profile)
return profiles
class ProfileLocate(BaseIPythonApplication):
description = """print the path to an IPython profile dir"""
def parse_command_line(self, argv=None):
super(ProfileLocate, self).parse_command_line(argv)
if self.extra_args:
self.profile = self.extra_args[0]
def start(self):
print(self.profile_dir.location)
class ProfileList(Application):
name = u'ipython-profile'
description = list_help
examples = _list_examples
aliases = Dict({
'ipython-dir' : 'ProfileList.ipython_dir',
'log-level' : 'Application.log_level',
})
flags = Dict(dict(
debug = ({'Application' : {'log_level' : 0}},
"Set Application.log_level to 0, maximizing log output."
)
))
ipython_dir = Unicode(get_ipython_dir(), config=True,
help="""
The name of the IPython directory. This directory is used for logging
configuration (through profiles), history storage, etc. The default
is usually $HOME/.ipython. This options can also be specified through
the environment variable IPYTHONDIR.
"""
)
def _print_profiles(self, profiles):
"""print list of profiles, indented."""
for profile in profiles:
print(' %s' % profile)
def list_profile_dirs(self):
profiles = list_bundled_profiles()
if profiles:
print()
print("Available profiles in IPython:")
self._print_profiles(profiles)
print()
print(" The first request for a bundled profile will copy it")
print(" into your IPython directory (%s)," % self.ipython_dir)
print(" where you can customize it.")
profiles = list_profiles_in(self.ipython_dir)
if profiles:
print()
print("Available profiles in %s:" % self.ipython_dir)
self._print_profiles(profiles)
profiles = list_profiles_in(py3compat.getcwd())
if profiles:
print()
print("Available profiles in current directory (%s):" % py3compat.getcwd())
self._print_profiles(profiles)
print()
print("To use any of the above profiles, start IPython with:")
print(" ipython --profile=<name>")
print()
def start(self):
self.list_profile_dirs()
create_flags = {}
create_flags.update(base_flags)
# don't include '--init' flag, which implies running profile create in other apps
create_flags.pop('init')
create_flags['reset'] = ({'ProfileCreate': {'overwrite' : True}},
"reset config files in this profile to the defaults.")
create_flags['parallel'] = ({'ProfileCreate': {'parallel' : True}},
"Include the config files for parallel "
"computing apps (ipengine, ipcontroller, etc.)")
class ProfileCreate(BaseIPythonApplication):
name = u'ipython-profile'
description = create_help
examples = _create_examples
auto_create = Bool(True, config=False)
def _log_format_default(self):
return "[%(name)s] %(message)s"
def _copy_config_files_default(self):
return True
parallel = Bool(False, config=True,
help="whether to include parallel computing config files")
def _parallel_changed(self, name, old, new):
parallel_files = [ 'ipcontroller_config.py',
'ipengine_config.py',
'ipcluster_config.py'
]
if new:
for cf in parallel_files:
self.config_files.append(cf)
else:
for cf in parallel_files:
if cf in self.config_files:
self.config_files.remove(cf)
def parse_command_line(self, argv):
super(ProfileCreate, self).parse_command_line(argv)
# accept positional arg as profile name
if self.extra_args:
self.profile = self.extra_args[0]
flags = Dict(create_flags)
classes = [ProfileDir]
def _import_app(self, app_path):
"""import an app class"""
app = None
name = app_path.rsplit('.', 1)[-1]
try:
app = import_item(app_path)
except ImportError:
self.log.info("Couldn't import %s, config file will be excluded", name)
except Exception:
self.log.warn('Unexpected error importing %s', name, exc_info=True)
return app
def init_config_files(self):
super(ProfileCreate, self).init_config_files()
# use local imports, since these classes may import from here
from IPython.terminal.ipapp import TerminalIPythonApp
apps = [TerminalIPythonApp]
for app_path in (
'IPython.qt.console.qtconsoleapp.IPythonQtConsoleApp',
'IPython.html.notebookapp.NotebookApp',
'IPython.nbconvert.nbconvertapp.NbConvertApp',
):
app = self._import_app(app_path)
if app is not None:
apps.append(app)
if self.parallel:
from IPython.parallel.apps.ipcontrollerapp import IPControllerApp
from IPython.parallel.apps.ipengineapp import IPEngineApp
from IPython.parallel.apps.ipclusterapp import IPClusterStart
from IPython.parallel.apps.iploggerapp import IPLoggerApp
apps.extend([
IPControllerApp,
IPEngineApp,
IPClusterStart,
IPLoggerApp,
])
for App in apps:
app = App()
app.config.update(self.config)
app.log = self.log
app.overwrite = self.overwrite
app.copy_config_files=True
app.ipython_dir=self.ipython_dir
app.profile_dir=self.profile_dir
app.init_config_files()
def stage_default_config_file(self):
pass
class ProfileApp(Application):
name = u'ipython-profile'
description = profile_help
examples = _main_examples
subcommands = Dict(dict(
create = (ProfileCreate, ProfileCreate.description.splitlines()[0]),
list = (ProfileList, ProfileList.description.splitlines()[0]),
locate = (ProfileLocate, ProfileLocate.description.splitlines()[0]),
))
def start(self):
if self.subapp is None:
print("No subcommand specified. Must specify one of: %s"%(self.subcommands.keys()))
print()
self.print_description()
self.print_subcommands()
self.exit(1)
else:
return self.subapp.start()
| [
"andy@youshallthrive.com"
] | andy@youshallthrive.com |
31b1631b1523aadad273e28fadb8ad1c54978cc0 | 951400a855a6f4af8d9dedfd533e4a19f243f1c7 | /tree.py | 66022eeec58704396df2b56fba968f6e0af905ff | [] | no_license | twangad/test | b698939f4a0033505c1fd1e1a5c2e8757683cf0b | d73bdf1554a520a6892f873777e2226fa09ed151 | refs/heads/master | 2020-07-02T10:17:13.132626 | 2016-09-11T09:17:37 | 2016-09-11T09:17:37 | 67,919,979 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | ###
class test():
def __init__():
pass
def saysth():
print "here" | [
"twangad@connect.ust.hk"
] | twangad@connect.ust.hk |
7f12cf4f8c2a9dbbd0be88734b98d0c8b28eca87 | e9bc070d1d9257c4a213bc1f33ca6269bbc37b43 | /tests/roots/test-ext-autosummary/conf.py | f4d696cc912bb3108db71ca0fb841c3d904f7427 | [
"BSD-3-Clause",
"Python-2.0",
"LicenseRef-scancode-secret-labs-2011",
"MIT",
"BSD-2-Clause"
] | permissive | GoodRx/sphinx | 99b33454afa06cf6a66d080c3c4019cc7ddde2f0 | c310c73baffa4892cf35fd74918193824c86309a | refs/heads/1.6.x-py-type-xref | 2021-01-01T06:02:33.415993 | 2017-07-16T03:12:58 | 2017-07-16T03:12:58 | 97,339,105 | 1 | 1 | null | 2017-07-16T03:12:58 | 2017-07-15T19:57:45 | Python | UTF-8 | Python | false | false | 184 | py | import sys, os
sys.path.insert(0, os.path.abspath('.'))
extensions = ['sphinx.ext.autosummary']
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
| [
"i.tkomiya@gmail.com"
] | i.tkomiya@gmail.com |
1fad6fbeeeb619735e591e2a715bef13c07b1e3b | bc9f66258575dd5c8f36f5ad3d9dfdcb3670897d | /lib/googlecloudsdk/generated_clients/apis/gkehub/v1alpha1/gkehub_v1alpha1_client.py | 45d59b19c56748c72896a2a2c8b5b7fce532c530 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | google-cloud-sdk-unofficial/google-cloud-sdk | 05fbb473d629195f25887fc5bfaa712f2cbc0a24 | 392abf004b16203030e6efd2f0af24db7c8d669e | refs/heads/master | 2023-08-31T05:40:41.317697 | 2023-08-23T18:23:16 | 2023-08-23T18:23:16 | 335,182,594 | 9 | 2 | NOASSERTION | 2022-10-29T20:49:13 | 2021-02-02T05:47:30 | Python | UTF-8 | Python | false | false | 20,475 | py | """Generated client library for gkehub version v1alpha1."""
# NOTE: This file is autogenerated and should not be edited by hand.
from __future__ import absolute_import
from apitools.base.py import base_api
from googlecloudsdk.generated_clients.apis.gkehub.v1alpha1 import gkehub_v1alpha1_messages as messages
class GkehubV1alpha1(base_api.BaseApiClient):
"""Generated client library for service gkehub version v1alpha1."""
MESSAGES_MODULE = messages
BASE_URL = 'https://gkehub.googleapis.com/'
MTLS_BASE_URL = 'https://gkehub.mtls.googleapis.com/'
_PACKAGE = 'gkehub'
_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
_VERSION = 'v1alpha1'
_CLIENT_ID = 'CLIENT_ID'
_CLIENT_SECRET = 'CLIENT_SECRET'
_USER_AGENT = 'google-cloud-sdk'
_CLIENT_CLASS_NAME = 'GkehubV1alpha1'
_URL_VERSION = 'v1alpha1'
_API_KEY = None
def __init__(self, url='', credentials=None,
get_credentials=True, http=None, model=None,
log_request=False, log_response=False,
credentials_args=None, default_global_params=None,
additional_http_headers=None, response_encoding=None):
"""Create a new gkehub handle."""
url = url or self.BASE_URL
super(GkehubV1alpha1, self).__init__(
url, credentials=credentials,
get_credentials=get_credentials, http=http, model=model,
log_request=log_request, log_response=log_response,
credentials_args=credentials_args,
default_global_params=default_global_params,
additional_http_headers=additional_http_headers,
response_encoding=response_encoding)
self.projects_locations_features = self.ProjectsLocationsFeaturesService(self)
self.projects_locations_global_features = self.ProjectsLocationsGlobalFeaturesService(self)
self.projects_locations_global = self.ProjectsLocationsGlobalService(self)
self.projects_locations_operations = self.ProjectsLocationsOperationsService(self)
self.projects_locations = self.ProjectsLocationsService(self)
self.projects = self.ProjectsService(self)
class ProjectsLocationsFeaturesService(base_api.BaseApiService):
"""Service class for the projects_locations_features resource."""
_NAME = 'projects_locations_features'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsFeaturesService, self).__init__(client)
self._upload_configs = {
}
def GetIamPolicy(self, request, global_params=None):
r"""Gets the access control policy for a resource. Returns an empty policy if the resource exists and does not have a policy set.
Args:
request: (GkehubProjectsLocationsFeaturesGetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('GetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
GetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/features/{featuresId}:getIamPolicy',
http_method='GET',
method_id='gkehub.projects.locations.features.getIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=['options_requestedPolicyVersion'],
relative_path='v1alpha1/{+resource}:getIamPolicy',
request_field='',
request_type_name='GkehubProjectsLocationsFeaturesGetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def SetIamPolicy(self, request, global_params=None):
r"""Sets the access control policy on the specified resource. Replaces any existing policy. Can return `NOT_FOUND`, `INVALID_ARGUMENT`, and `PERMISSION_DENIED` errors.
Args:
request: (GkehubProjectsLocationsFeaturesSetIamPolicyRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Policy) The response message.
"""
config = self.GetMethodConfig('SetIamPolicy')
return self._RunMethod(
config, request, global_params=global_params)
SetIamPolicy.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/features/{featuresId}:setIamPolicy',
http_method='POST',
method_id='gkehub.projects.locations.features.setIamPolicy',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:setIamPolicy',
request_field='setIamPolicyRequest',
request_type_name='GkehubProjectsLocationsFeaturesSetIamPolicyRequest',
response_type_name='Policy',
supports_download=False,
)
def TestIamPermissions(self, request, global_params=None):
r"""Returns permissions that a caller has on the specified resource. If the resource does not exist, this will return an empty set of permissions, not a `NOT_FOUND` error. Note: This operation is designed to be used for building permission-aware UIs and command-line tools, not for authorization checking. This operation may "fail open" without warning.
Args:
request: (GkehubProjectsLocationsFeaturesTestIamPermissionsRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(TestIamPermissionsResponse) The response message.
"""
config = self.GetMethodConfig('TestIamPermissions')
return self._RunMethod(
config, request, global_params=global_params)
TestIamPermissions.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/features/{featuresId}:testIamPermissions',
http_method='POST',
method_id='gkehub.projects.locations.features.testIamPermissions',
ordered_params=['resource'],
path_params=['resource'],
query_params=[],
relative_path='v1alpha1/{+resource}:testIamPermissions',
request_field='testIamPermissionsRequest',
request_type_name='GkehubProjectsLocationsFeaturesTestIamPermissionsRequest',
response_type_name='TestIamPermissionsResponse',
supports_download=False,
)
class ProjectsLocationsGlobalFeaturesService(base_api.BaseApiService):
"""Service class for the projects_locations_global_features resource."""
_NAME = 'projects_locations_global_features'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsGlobalFeaturesService, self).__init__(client)
self._upload_configs = {
}
def Create(self, request, global_params=None):
r"""Adds a new Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesCreateRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Create')
return self._RunMethod(
config, request, global_params=global_params)
Create.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features',
http_method='POST',
method_id='gkehub.projects.locations.global.features.create',
ordered_params=['parent'],
path_params=['parent'],
query_params=['featureId'],
relative_path='v1alpha1/{+parent}/features',
request_field='feature',
request_type_name='GkehubProjectsLocationsGlobalFeaturesCreateRequest',
response_type_name='Operation',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Removes a Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features/{featuresId}',
http_method='DELETE',
method_id='gkehub.projects.locations.global.features.delete',
ordered_params=['name'],
path_params=['name'],
query_params=['force'],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsGlobalFeaturesDeleteRequest',
response_type_name='Operation',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets details of a single Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Feature) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features/{featuresId}',
http_method='GET',
method_id='gkehub.projects.locations.global.features.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsGlobalFeaturesGetRequest',
response_type_name='Feature',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists Features in a given project and location.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListFeaturesResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features',
http_method='GET',
method_id='gkehub.projects.locations.global.features.list',
ordered_params=['parent'],
path_params=['parent'],
query_params=['filter', 'orderBy', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+parent}/features',
request_field='',
request_type_name='GkehubProjectsLocationsGlobalFeaturesListRequest',
response_type_name='ListFeaturesResponse',
supports_download=False,
)
def Patch(self, request, global_params=None):
r"""Updates an existing Feature.
Args:
request: (GkehubProjectsLocationsGlobalFeaturesPatchRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Patch')
return self._RunMethod(
config, request, global_params=global_params)
Patch.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/global/features/{featuresId}',
http_method='PATCH',
method_id='gkehub.projects.locations.global.features.patch',
ordered_params=['name'],
path_params=['name'],
query_params=['updateMask'],
relative_path='v1alpha1/{+name}',
request_field='feature',
request_type_name='GkehubProjectsLocationsGlobalFeaturesPatchRequest',
response_type_name='Operation',
supports_download=False,
)
class ProjectsLocationsGlobalService(base_api.BaseApiService):
"""Service class for the projects_locations_global resource."""
_NAME = 'projects_locations_global'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsGlobalService, self).__init__(client)
self._upload_configs = {
}
class ProjectsLocationsOperationsService(base_api.BaseApiService):
"""Service class for the projects_locations_operations resource."""
_NAME = 'projects_locations_operations'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsOperationsService, self).__init__(client)
self._upload_configs = {
}
def Cancel(self, request, global_params=None):
r"""Starts asynchronous cancellation on a long-running operation. The server makes a best effort to cancel the operation, but success is not guaranteed. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`. Clients can use Operations.GetOperation or other methods to check whether the cancellation succeeded or whether the operation completed despite cancellation. On successful cancellation, the operation is not deleted; instead, it becomes an operation with an Operation.error value with a google.rpc.Status.code of 1, corresponding to `Code.CANCELLED`.
Args:
request: (GkehubProjectsLocationsOperationsCancelRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Cancel')
return self._RunMethod(
config, request, global_params=global_params)
Cancel.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}:cancel',
http_method='POST',
method_id='gkehub.projects.locations.operations.cancel',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}:cancel',
request_field='cancelOperationRequest',
request_type_name='GkehubProjectsLocationsOperationsCancelRequest',
response_type_name='Empty',
supports_download=False,
)
def Delete(self, request, global_params=None):
r"""Deletes a long-running operation. This method indicates that the client is no longer interested in the operation result. It does not cancel the operation. If the server doesn't support this method, it returns `google.rpc.Code.UNIMPLEMENTED`.
Args:
request: (GkehubProjectsLocationsOperationsDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
"""
config = self.GetMethodConfig('Delete')
return self._RunMethod(
config, request, global_params=global_params)
Delete.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='DELETE',
method_id='gkehub.projects.locations.operations.delete',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsOperationsDeleteRequest',
response_type_name='Empty',
supports_download=False,
)
def Get(self, request, global_params=None):
r"""Gets the latest state of a long-running operation. Clients can use this method to poll the operation result at intervals as recommended by the API service.
Args:
request: (GkehubProjectsLocationsOperationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Operation) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations/{operationsId}',
http_method='GET',
method_id='gkehub.projects.locations.operations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsOperationsGetRequest',
response_type_name='Operation',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists operations that match the specified filter in the request. If the server doesn't support this method, it returns `UNIMPLEMENTED`.
Args:
request: (GkehubProjectsLocationsOperationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListOperationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}/operations',
http_method='GET',
method_id='gkehub.projects.locations.operations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+name}/operations',
request_field='',
request_type_name='GkehubProjectsLocationsOperationsListRequest',
response_type_name='ListOperationsResponse',
supports_download=False,
)
class ProjectsLocationsService(base_api.BaseApiService):
"""Service class for the projects_locations resource."""
_NAME = 'projects_locations'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsLocationsService, self).__init__(client)
self._upload_configs = {
}
def Get(self, request, global_params=None):
r"""Gets information about a location.
Args:
request: (GkehubProjectsLocationsGetRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Location) The response message.
"""
config = self.GetMethodConfig('Get')
return self._RunMethod(
config, request, global_params=global_params)
Get.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations/{locationsId}',
http_method='GET',
method_id='gkehub.projects.locations.get',
ordered_params=['name'],
path_params=['name'],
query_params=[],
relative_path='v1alpha1/{+name}',
request_field='',
request_type_name='GkehubProjectsLocationsGetRequest',
response_type_name='Location',
supports_download=False,
)
def List(self, request, global_params=None):
r"""Lists information about the supported locations for this service.
Args:
request: (GkehubProjectsLocationsListRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(ListLocationsResponse) The response message.
"""
config = self.GetMethodConfig('List')
return self._RunMethod(
config, request, global_params=global_params)
List.method_config = lambda: base_api.ApiMethodInfo(
flat_path='v1alpha1/projects/{projectsId}/locations',
http_method='GET',
method_id='gkehub.projects.locations.list',
ordered_params=['name'],
path_params=['name'],
query_params=['filter', 'includeUnrevealedLocations', 'pageSize', 'pageToken'],
relative_path='v1alpha1/{+name}/locations',
request_field='',
request_type_name='GkehubProjectsLocationsListRequest',
response_type_name='ListLocationsResponse',
supports_download=False,
)
class ProjectsService(base_api.BaseApiService):
"""Service class for the projects resource."""
_NAME = 'projects'
def __init__(self, client):
super(GkehubV1alpha1.ProjectsService, self).__init__(client)
self._upload_configs = {
}
| [
"cloudsdk.mirror@gmail.com"
] | cloudsdk.mirror@gmail.com |
c1b3876aae1a898188d4da189bd9db75e5afc8c6 | 41249d7d4ca9950b9c6fee89bf7e2c1929629767 | /results/lz_optimizations_20200507/script_lz_crab4freq_powell_bound10_constantFreqAndInitAmps_tf0-1.py | d14345a8c9437a041da7e650381b2b1114829de0 | [
"MIT"
] | permissive | lucainnocenti/ultrafast-critical-ground-state-preparation-2007.07381 | f739b3baad1d2aadda576303bb0bbe9d48ec204a | 29f80dcf914096555cee9bc2e18249a2c95d6a50 | refs/heads/master | 2022-11-22T00:44:09.998199 | 2020-07-21T08:35:28 | 2020-07-21T08:35:28 | 281,237,037 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,458 | py | import os
import sys
import numpy as np
import pandas as pd
import logging
if '../../' not in sys.path:
sys.path.append('../../')
import src.optimization as optimization
import src.protocol_ansatz as protocol_ansatz
from src.utils import autonumber_filename, basic_logger_configuration
output_file_name = os.path.basename(__file__)[7:-3] + '.csv'
output_file_name = autonumber_filename(output_file_name)
basic_logger_configuration(filename=output_file_name[:-3] + 'log')
logging.info('Output file name will be "{}"'.format(output_file_name))
# ------ start optimization
num_frequencies = 4
protocol = protocol_ansatz.CRABProtocolAnsatz(num_frequencies=num_frequencies)
protocol.generate_rnd_frequencies_each_tf = False
for idx in range(num_frequencies):
protocol.hyperpars['nuk' + str(idx + 1)] = 0
protocol.fill_hyperpar_value(y0=-5, y1=0)
results = optimization.find_best_protocol(
problem_specification=dict(
model='lz',
model_parameters=dict(omega_0=1),
task=dict(initial_intensity=-5, final_intensity=0)
),
optimization_specs=dict(
protocol=protocol,
protocol_options=dict(num_frequencies=num_frequencies),
optimization_method='powell',
parameters_constraints=[-10, 10],
initial_parameters=[0] * (2 * num_frequencies)
),
other_options=dict(
scan_times=np.linspace(0.01, 1, 200)
)
)
# ------ save results to file
results.to_csv(output_file_name)
| [
"lukeinnocenti@gmail.com"
] | lukeinnocenti@gmail.com |
a435a2a71d4aaf93551b2f8952ededd0cb812d28 | 51ae004ddefa3e6c942e18348b4b14d95e8fdf0e | /src/__main__.py | 3d2b2baedc1de9302d48136b20547f5e2a8c2687 | [] | no_license | ewascent/python_file_sample | 10c6209b39f8c40605e7152b199f1cf6c5dcd0a1 | e0f1adc4be2e3b71aa583629cfa9b3748ca669c4 | refs/heads/master | 2020-03-30T22:49:18.364585 | 2018-10-08T07:08:50 | 2018-10-08T07:08:50 | 151,679,053 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,327 | py | """main is main"""
import sys
from error import InsufficientArguments
from error import ArgumentTypeException
import filer
from utilities import setup_logging
__author__ = "ewascent"
__copyright__ = "ewascent"
__license__ = "mit"
def main(_args=None):
"""enter the dragon, is what I imagine the main method saying"""
try:
_logger = setup_logging('info')
if _args is None:
_args = sys.argv
files = _args
result_count = 100
for file in files:
if "__main__.py" not in file:
_logger.info(f"Recieved path to file: {file}")
results = filer.outputter(some_collection=filer.reader(file),
this_many_results=result_count)
print(f'Top {result_count} matches for file {file}')
for result in results:
print(result)
except InsufficientArguments:
_logger.error("Recieved no file input")
raise
except ArgumentTypeException:
_logger.error("Not a valid file path")
raise
except:
_logger.error("Unexpected error: %s", sys.exc_info()[0])
print("Unexpected error:", sys.exc_info()[0])
raise
if __name__ == "__main__":
main(sys.argv)
| [
"ewascent@gmail.com"
] | ewascent@gmail.com |
2578305f74225a3ce266d7e62e3ee9cd11303766 | 7ff9de453f53b658d13896bb0c376d67489145a7 | /python_basics/datatypes/strings.py | 67ce0ec20bd3b0a951eaa007570a45e52eb2a7a7 | [
"MIT"
] | permissive | danielkpodo/python-zero-to-mastery | aa4851fd0bfe1f0bfa4191fa141fa9551fd7c7a9 | d39468f48211bc82e4e2613745d9107d433e05af | refs/heads/master | 2022-11-21T10:55:54.776490 | 2020-07-19T15:07:35 | 2020-07-19T15:07:35 | 275,909,257 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | username = "narh"
last_name = "kpodo"
| [
"kpododanielnarh@gmail.com"
] | kpododanielnarh@gmail.com |
06b23e46c8862a2a3ab779c2dfd4b094a8b55540 | 041b8daad5f4c72ae81a9706a2a3e5f56a36995f | /Python OOP/innerclass.py | 82fe749eec290ecd675c705d600c1f4ba2bab67b | [] | no_license | hashansl/dash-plotly-training | 7100e1fa55fb489d2713b68bc0ff9e8f0ecef18b | 987d845f476a6501c1c8673e2423d1c258fb4dbe | refs/heads/main | 2023-06-03T09:38:33.363588 | 2021-06-02T03:38:42 | 2021-06-02T03:38:42 | 371,782,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 729 | py | #6
# you can create object of inner class inside the outer class
#OR
# you can create object of inner class outside the outer class provided you use outer class name to call it
class Student:
#Outer class
def __init__(self,name,rollno):
self.name=name
self.rollno=rollno
self.lap = self.Laptop()
def show(self):
print(self.name, self.rollno)
self.lap.show()
class Laptop:
#inner Class
def __init__(self):
self.brand = 'HP'
self.cpu = 'i5'
self.ram = 8
def show(self):
print(self.brand,self.cpu,self.ram)
s1 = Student('Hashan',2)
s2 = Student('Dananjaya',3)
s1.show()
lap1 = Student.Laptop()
| [
"hashan.dan@gmail.com"
] | hashan.dan@gmail.com |
8d416110ae94969cba5ebcae29b1d1e4b9bf6b17 | a0d06a661fd760b57e671582189f1fb1cbee87f0 | /src/chat_take/web/handlers.py | 5f21c90045824bb3d64b2d7b5a245d9832c8d143 | [] | no_license | Tsvetov/chat | 6def42a334a044806b9a483d07edc05662a44438 | 8f1b064c29acdc3f17ad8d0a228a40552cdf4130 | refs/heads/master | 2021-08-26T07:25:38.898642 | 2017-11-22T06:30:59 | 2017-11-22T06:30:59 | 111,576,973 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 410 | py | import logging
from redis import StrictRedis
from tornado import web, websocket, escape
r = StrictRedis(db=1)
logger = logging.getLogger('handlers')
class PingHandler(web.RequestHandler):
def get(self):
self.write('ok') # pylint: disable=no-member
class LogoutHandler(web.RequestHandler):
@web.authenticated
def get(self):
self.clear_cookie('user')
self.redirect('/') | [
"ptsvetov@MacBook-Pro-Admin-33.local"
] | ptsvetov@MacBook-Pro-Admin-33.local |
2c190be799017c52cc5a83639396080f5ef20ae9 | 82c54cab8e0c5b73e1fdb9615296613cc43929a0 | /authentication/forms.py | d3f7b622935250beef47f85ac1ec6f9ee9435405 | [] | no_license | creechcorbin/twitter_clone | e4146657bd13043544f846c48b34fe83e90e91da | bd075bd53fd9e5558cda85ade86ed9995f72118c | refs/heads/master | 2022-12-10T09:23:37.036180 | 2020-09-05T03:23:32 | 2020-09-05T03:23:32 | 292,993,852 | 0 | 0 | null | 2020-09-09T01:08:27 | 2020-09-05T03:22:43 | Python | UTF-8 | Python | false | false | 345 | py | from django import forms
class LoginForm(forms.Form):
username = forms.CharField(max_length=80)
password = forms.CharField(widget=forms.PasswordInput)
class SignupForm(forms.Form):
username = forms.CharField(max_length=80)
displayname = forms.CharField(max_length=80)
password = forms.CharField(widget=forms.PasswordInput)
| [
"creechcorbin@gmail.com"
] | creechcorbin@gmail.com |
6770119ff8804eda04aeeb3fd19760c08c6849a5 | 2a37885d0b4cd6e5938e6d564f189a9ae7ade21f | /day8.py | 862379ce3c86b437a52f5d727431add939afce58 | [] | no_license | wilsonconley/advent-of-code-2020 | 189df901ba14212bd2f27c055be3feb99d17d4bb | 74e7903d8c6e8abc91f1ce72b5984d93cf828ff1 | refs/heads/master | 2023-02-05T23:06:50.935046 | 2020-12-29T01:32:22 | 2020-12-29T01:32:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,547 | py | #!/usr/local/bin/python3
import os
import re
import string
import numpy as np
import copy
def read_file():
filename = os.path.join(os.path.dirname(os.path.abspath(__file__)), "Inputs", os.path.basename(__file__).replace("py","txt"))
print("Loading File:")
print(filename)
data = list()
f = open(filename)
for x in f:
data.append(x.replace("\n",""))
f.close()
return data
def run_scenario(data):
# print("run scenario on: ")
# print(data)
valid = True
run = np.zeros(len(data))
instruction = 0
while (instruction < len(data) and run[instruction] == 0):
# print("\tLine " + str(instruction))
run[instruction] = 1
x = data[instruction]
if x[0:3] == "acc":
instruction += 1
elif x[0:3] == "jmp":
instruction += int(x[4:])
else:
instruction += 1
# print("\tNew instrction = " + str(instruction))
if instruction < len(data) and run[instruction] == 1:
# print("run[instruction] = " + str(run[instruction]))
valid = False
return valid
if __name__ == "__main__":
data = read_file()
# part 1
run = np.zeros(len(data))
count = 0
instruction = 0
while (run[instruction] == 0):
run[instruction] = 1
x = data[instruction]
if x[0:3] == "acc":
# print("adding: " + str(int(x[4:])))
count += int(x[4:])
instruction += 1
elif x[0:3] == "jmp":
instruction += int(x[4:])
else:
instruction += 1
print("count = " + str(count))
# part 2
fixed = False
count = 0
instruction = 0
run = np.zeros(len(data))
while instruction < len(data):
print("Line " + str(instruction))
if run[instruction] == 1:
print(data[instruction] + "already run")
break
run[instruction] = 1
x = data[instruction]
if x[0:3] == "acc":
count += int(x[4:])
instruction += 1
elif x[0:3] == "jmp":
if not fixed:
tmp = copy.deepcopy(data)
tmp[instruction] = tmp[instruction].replace("jmp", "nop")
if run_scenario(tmp):
print("changing line " + str(instruction) + " from " + data[instruction] + " to " + tmp[instruction])
data = tmp
fixed = True
instruction += 1
else:
instruction += int(x[4:])
else:
instruction += int(x[4:])
else:
if not fixed:
tmp = copy.deepcopy(data)
tmp[instruction] = tmp[instruction].replace("nop", "jmp")
if run_scenario(tmp):
print("changing line " + str(instruction) + " from " + data[instruction] + " to " + tmp[instruction])
data = tmp
fixed = True
instruction += int(x[4:])
else:
instruction += 1
else:
instruction += 1
print("count = " + str(count)) | [
"wilsonconley@Wilsons-MacBook-Pro.local"
] | wilsonconley@Wilsons-MacBook-Pro.local |
c58f0a9f11b329810abbf4a905261ab209363ccb | efdab571b6273bbbcaddc49e1ca4978aa0625fa9 | /Fundamentals/Session1/Homework/converts.py | f8f1a8db4d349adddafe11fcc6a1610cac46df64 | [] | no_license | duongnt52/ngotungduong-fundamentals-c4e25 | 2f238b6a1bc7eeaf96617aca9e32cef28c1fbeb7 | 2f412cd370bbca4f826466e09d8f30de60b9c648 | refs/heads/master | 2020-04-13T07:49:04.366197 | 2019-02-27T12:50:28 | 2019-02-27T12:50:28 | 163,063,292 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | c = int(input("Enter the temperature in Celsirus? "))
f = (c * 1.8) + 32
print(c, "(C)", "=", f, "(F)") | [
"duongnt52"
] | duongnt52 |
ced0baa0e9192cab080e7e0c0c749c9c7e56e9a1 | 1da91735d1a4d19e62b2d19826d9a1e85d88d690 | /dxpy/dxpy/task/model/tests/test_task.py | 32e1f9139b28e9e0836aef2a1a5c31a6253ebbf0 | [] | no_license | Hong-Xiang/dxl | 94229e4c20f0c97dfe21f8563889c991330df9c3 | 29aed778d1c699cc57d09666a20b4ca60196392f | refs/heads/master | 2021-01-02T22:49:20.298893 | 2018-05-22T13:42:20 | 2018-05-22T13:42:20 | 99,401,725 | 1 | 1 | null | 2018-05-22T13:42:21 | 2017-08-05T05:34:35 | Python | UTF-8 | Python | false | false | 3,063 | py | import json
import unittest
from dxpy.task.model import task
from dxpy.time.timestamps import TaskStamp
from dxpy.time.utils import strp
class TestTask(unittest.TestCase):
def test_to_json(self):
t = task.Task(tid=10, desc='test', workdir='/tmp/test',
worker=task.Worker.MultiThreading,
ttype=task.Type.Regular,
dependency=[1, 2, 3],
time_stamp=TaskStamp(create=strp(
"2017-09-22 12:57:44.036185")),
data={'sample': 42},
is_root=True)
s = t.to_json()
dct = json.loads(s)
self.assertEqual(dct['id'], 10)
self.assertEqual(dct['desc'], 'test')
self.assertEqual(dct['dependency'], [1, 2, 3])
self.assertEqual(dct['data'], {'sample': 42})
self.assertEqual(dct['type'], 'Regular')
self.assertEqual(dct['workdir'], '/tmp/test')
self.assertEqual(dct['worker'], 'MultiThreading')
self.assertEqual(dct['is_root'], True)
self.assertEqual(dct['time_stamp'], {
'create': "2017-09-22 12:57:44.036185", 'start': None, 'end': None})
self.assertEqual(dct['state'], 'BeforeSubmit')
def test_from_json(self):
dct = {
'__task__': True,
'id': 10,
'desc': 'test',
'workdir': '/tmp/test',
'worker': 'Slurm',
'type': 'Script',
'dependency': [1, 2, 3],
'data': {'sample': 42},
'is_root': True,
'time_stamp': {
'create': "2017-09-22 12:57:44.036185",
'start': None,
'end': None
},
'state': 'BeforeSubmit'
}
t = task.Task.from_json(json.dumps(dct))
self.assertEqual(t.id, 10)
self.assertEqual(t.desc, 'test')
self.assertEqual(t.workdir, '/tmp/test')
self.assertEqual(t.worker, task.Worker.Slurm)
self.assertEqual(t.type, task.Type.Script)
self.assertEqual(t.dependency, [1, 2, 3])
self.assertEqual(t.data, {'sample': 42})
self.assertEqual(t.is_root, True)
self.assertEqual(t.time_stamp.create, strp(
"2017-09-22 12:57:44.036185"))
self.assertEqual(t.state, task.State.BeforeSubmit)
def test_submit(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.submit(t)
self.assertEqual(t.state, task.State.Pending)
def test_start(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.start(t)
self.assertEqual(t.state, task.State.Runing)
def test_complete(self):
t = task.Task(10, 'test', state=task.State.BeforeSubmit)
self.assertEqual(t.state, task.State.BeforeSubmit)
t = task.complete(t)
self.assertEqual(t.state, task.State.Complete)
| [
"hx.hongxiang@gmail.com"
] | hx.hongxiang@gmail.com |
973be0558ece5d4ee643158fbb3ac967a41dac12 | a87294fad6d80d0cdae0d2871626acce67442115 | /TenderPost/apps.py | 9872dd730bd48f2a5d384ff116d6682574cbdb42 | [] | no_license | Sadat-Shahriyar/Amphitetris | a9f41b2880770f074348c49ff29ce444c4f0b10f | f3cb1cc370bce6f1a61ac9f6e70deb3710967da2 | refs/heads/master | 2022-12-23T23:37:52.837438 | 2020-09-27T16:40:00 | 2020-09-27T16:40:00 | 471,795,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | from django.apps import AppConfig
class TenderpostConfig(AppConfig):
name = 'TenderPost'
| [
"jayantasadhu4557@gmail.com"
] | jayantasadhu4557@gmail.com |
322904d370ccd9b2b0310c2f06e700406ee35483 | 3e201c6c7ac24b425c8e2f73b4f23e5681b0b6a7 | /My Phrases/new-tab.py | 00f3ef6e9823e426f26955afee51a8e72bf64922 | [
"MIT"
] | permissive | yasapurnama/autokey-osx-ify | 5e332b6110928710f3230d2df1816d5ea59e65a1 | a690c5892ff8124905cbf11399ab183d7804bc13 | refs/heads/master | 2020-12-20T14:19:34.697588 | 2020-02-28T13:44:37 | 2020-02-28T13:44:37 | 236,105,369 | 0 | 0 | MIT | 2020-01-25T00:15:27 | 2020-01-25T00:15:26 | null | UTF-8 | Python | false | false | 190 | py | import re
keys = "<ctrl>+t"
window = window.get_active_class()
is_terminal = re.search('term', window, re.IGNORECASE)
if is_terminal:
keys = "<shift>+" + keys
keyboard.send_keys(keys)
| [
"me@glenn-roberts.com"
] | me@glenn-roberts.com |
3317acbb6b1c8517bfce38ae6b51df5e4c04a897 | ed8126f7a19e4ed71a2a0c3b28f59e9a2787cf47 | /tests/lagrange.py | 4e4620af1e0eff87ca331dea9c6f8cde2753fb09 | [
"Apache-2.0"
] | permissive | acrovato/dg-flo | ccdb5dba706bfbfae76af98fb8aeb96d6a110061 | 759263f80c92984b2c1dada11a09e17235b529ce | refs/heads/main | 2023-02-22T09:29:57.677025 | 2021-01-17T19:02:01 | 2021-01-17T19:02:01 | 313,250,255 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,922 | py | # -*- coding: utf8 -*-
# test encoding: à-é-è-ô-ï-€
# Copyright 2021 Adrien Crovato
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Lagrange shape functions test
# Adrien Crovato
#
# Test the Lagrange shape functions for order p (n = p+1)
import numpy as np
import fe.quadrature as quad
import fe.shapes as shp
import utils.testing as tst
from run import parse
def main():
# Create evaluation and interpolation points
p = 4 # order
x = np.linspace(-1,1,100)
xi = quad.GaussLegendreLobatto(p).x
# Create shape functions
shape = shp.Lagrange(x, xi)
print(shape)
# Store and plot
if parse().gui:
import matplotlib.pyplot as plt
l = np.zeros((shape.n, len(x)))
dl = np.zeros((shape.n, len(x)))
for k in range(len(x)):
l[:, k] = np.transpose(shape.sf[k])
dl[:, k] = shape.dsf[k]
plt.figure(1)
for i in range(shape.n):
plt.plot(x, l[i, :])
plt.plot(xi[i], 0, 'ko')
plt.xlabel('x')
plt.ylabel('N_i')
plt.title('Shape functions of order {:d}'.format(p))
plt.figure(2)
for i in range(shape.n):
plt.plot(x, dl[i, :])
plt.plot(xi[i], 0, 'ko')
plt.xlabel('x')
plt.ylabel('dN_i/dx')
plt.title('Shape function derivatives of order {:d}'.format(p))
plt.show()
if __name__=="__main__":
main()
| [
"39187559+acrovato@users.noreply.github.com"
] | 39187559+acrovato@users.noreply.github.com |
eee180705f38d0e11b8a5778069d77230bafec5f | 481452cd3b904af7a42bbeb71190a59c29e4775b | /python_batch_4/class2/typecasting2.py | deb323944f44ee751f0fd3988dc54191fb1697f1 | [] | no_license | rahusriv/python_tutorial | b09b54044f9df86ac603634ac1dd8d4ea6705e4a | 7de9b62a8e1e8ca1df5f2679ebf17d655f6b1b8e | refs/heads/master | 2020-03-28T11:24:16.468977 | 2019-05-12T06:51:32 | 2019-05-12T06:51:32 | 148,209,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83 | py | a = "20.99"
b = "30.89"
c = int(float(a)) +int(float(b))
print(type(c))
print(c) | [
"rahusr@gmail.com"
] | rahusr@gmail.com |
9479f066756090388c2092129ef0059b3ebf32ea | cf14b6ee602bff94d3fc2d7e712b06458540eed7 | /gs105/gs105/settings.py | 422043aee64a923a3033927c1f8cb6ac0230c445 | [] | no_license | ManishShah120/Learning-Django | 8b0d7bfe7e7c13dcb71bb3d0dcdf3ebe7c36db27 | 8fe70723d18884e103359c745fb0de5498b8d594 | refs/heads/master | 2023-03-29T09:49:47.694123 | 2021-03-28T16:04:34 | 2021-03-28T16:04:34 | 328,925,596 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,073 | py | """
Django settings for gs105 project.
Generated by 'django-admin startproject' using Django 3.1.5.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ytgu6b45d)u!-fh@a_v#1d*#010=aih7p8o5juvr(v$ubumwn='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'school',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gs105.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gs105.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"mkshah141@gmail.com"
] | mkshah141@gmail.com |
2f62066c180ecaec7d3c36b4eb514313cea1f73a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03605/s666410011.py | ee1b3b217136d88dfc453fa50b5f4c38f78ab5b2 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | N=input()
if N.count("9"):
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9547a9fc3daa754897d982f560d79f01d3208f12 | 51fc1cdba445ff736911e21e0c8a7fecb252dd67 | /generate_training_data.py | b59fed61337e20b0724a6bebc25975eac5c98df8 | [] | no_license | ZhengPeng0115/MTGNN | bda54d946581fb0c400e94d0a67045e70d37fb75 | b5558528e9840c50d177d6175ca1214d1bc9886e | refs/heads/master | 2022-12-31T18:35:41.981755 | 2020-10-15T15:50:54 | 2020-10-15T15:50:54 | 304,375,049 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,911 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import os
import pandas as pd
def generate_graph_seq2seq_io_data(
df, x_offsets, y_offsets, add_time_in_day=True, add_day_in_week=False, scaler=None
):
"""
Generate samples from
:param df:
:param x_offsets:
:param y_offsets:
:param add_time_in_day:
:param add_day_in_week:
:param scaler:
:return:
# x: (epoch_size, input_length, num_nodes, input_dim)
# y: (epoch_size, output_length, num_nodes, output_dim)
"""
num_samples, num_nodes = df.shape
data = np.expand_dims(df.values, axis=-1)
data_list = [data]
if add_time_in_day:
time_ind = (df.index.values - df.index.values.astype("datetime64[D]")) / np.timedelta64(1, "D")
time_in_day = np.tile(time_ind, [1, num_nodes, 1]).transpose((2, 1, 0))
data_list.append(time_in_day)
if add_day_in_week:
day_in_week = np.zeros(shape=(num_samples, num_nodes, 7))
day_in_week[np.arange(num_samples), :, df.index.dayofweek] = 1
data_list.append(day_in_week)
data = np.concatenate(data_list, axis=-1)
# epoch_len = num_samples + min(x_offsets) - max(y_offsets)
x, y = [], []
# t is the index of the last observation.
min_t = abs(min(x_offsets))
max_t = abs(num_samples - abs(max(y_offsets))) # Exclusive
for t in range(min_t, max_t):
x_t = data[t + x_offsets, ...]
y_t = data[t + y_offsets, ...]
x.append(x_t)
y.append(y_t)
x = np.stack(x, axis=0)
y = np.stack(y, axis=0)
return x, y
def generate_train_val_test(args):
df = pd.read_hdf(args.traffic_df_filename)
# 0 is the latest observed sample.
x_offsets = np.sort(
# np.concatenate(([-week_size + 1, -day_size + 1], np.arange(-11, 1, 1)))
np.concatenate((np.arange(-11, 1, 1),))
)
# Predict the next one hour
y_offsets = np.sort(np.arange(1, 13, 1))
# x: (num_samples, input_length, num_nodes, input_dim)
# y: (num_samples, output_length, num_nodes, output_dim)
x, y = generate_graph_seq2seq_io_data(
df,
x_offsets=x_offsets,
y_offsets=y_offsets,
add_time_in_day=True,
add_day_in_week=False,
)
print("x shape: ", x.shape, ", y shape: ", y.shape)
# Write the data into npz file.
# num_test = 6831, using the last 6831 examples as testing.
# for the rest: 7/8 is used for training, and 1/8 is used for validation.
num_samples = x.shape[0]
num_test = round(num_samples * 0.2)
num_train = round(num_samples * 0.7)
num_val = num_samples - num_test - num_train
# train
x_train, y_train = x[:num_train], y[:num_train]
# val
x_val, y_val = (
x[num_train: num_train + num_val],
y[num_train: num_train + num_val],
)
# test
x_test, y_test = x[-num_test:], y[-num_test:]
for cat in ["train", "val", "test"]:
_x, _y = locals()["x_" + cat], locals()["y_" + cat]
print(cat, "x: ", _x.shape, "y:", _y.shape)
np.savez_compressed(
os.path.join(args.output_dir, "%s.npz" % cat),
x=_x,
y=_y,
x_offsets=x_offsets.reshape(list(x_offsets.shape) + [1]),
y_offsets=y_offsets.reshape(list(y_offsets.shape) + [1]),
)
def main(args):
print("Generating training data")
generate_train_val_test(args)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_dir", type=str, default="data/METR-LA", help="Output directory."
)
parser.add_argument(
"--traffic_df_filename",
type=str,
default="data/metr-la.h5",
help="Raw traffic readings.",
)
args = parser.parse_args()
main(args)
| [
"zpengsdu@gmail.com"
] | zpengsdu@gmail.com |
aa6af7048c44cea9653dd669212be652afc07c82 | 960b3a17a4011264a001304e64bfb76d669b8ac5 | /mstrio/api/authentication.py | ee18f3ec2d1622d62b49c9697d82696d49d54468 | [
"Apache-2.0"
] | permissive | MicroStrategy/mstrio-py | 012d55df782a56dab3a32e0217b9cbfd0b59b8dd | c6cea33b15bcd876ded4de25138b3f5e5165cd6d | refs/heads/master | 2023-08-08T17:12:07.714614 | 2023-08-03T12:30:11 | 2023-08-03T12:30:11 | 138,627,591 | 84 | 60 | Apache-2.0 | 2023-07-31T06:43:33 | 2018-06-25T17:23:55 | Python | UTF-8 | Python | false | false | 5,218 | py | from mstrio.utils.error_handlers import ErrorHandler
@ErrorHandler(
err_msg='Authentication error. Check user credentials or REST API URL and try again'
)
def login(connection):
"""Authenticate a user and create an HTTP session on the web server where
the user's MicroStrategy sessions are stored.
This request returns an authorization token (X-MSTR-AuthToken) which will be
submitted with subsequent requests. The body of the request contains
the information needed to create the session. The loginMode parameter in
the body specifies the authentication mode to use. You can authenticate with
one of the following authentication modes: Standard (1), Anonymous (8),
or LDAP (16). Authentication modes can be enabled through the System
Administration REST APIs, if they are supported by the deployment.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/login',
data={
'username': connection.username,
'password': connection._Connection__password,
'loginMode': connection.login_mode,
'applicationType': 35,
},
)
@ErrorHandler(err_msg="Failed to logout.")
def logout(connection, error_msg=None, whitelist=None):
"""Close all existing sessions for the authenticated user.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/logout',
headers={'X-MSTR-ProjectID': None},
)
def session_renew(connection):
"""Extends the HTTP and Intelligence Server sessions by resetting the
timeouts.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.put(
skip_expiration_check=True,
url=f'{connection.base_url}/api/sessions',
headers={'X-MSTR-ProjectID': None},
timeout=2.0,
)
def session_status(connection):
"""Checks Intelligence Server session status.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.get(
skip_expiration_check=True,
url=f'{connection.base_url}/api/sessions',
headers={'X-MSTR-ProjectID': None},
)
@ErrorHandler(err_msg='Could not get identity token.')
def identity_token(connection):
"""Create a new identity token.
An identity token is used to share an existing session with another
project, based on the authorization token for the existing
session.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.post(
url=f'{connection.base_url}/api/auth/identityToken',
)
def validate_identity_token(connection, identity_token):
"""Validate an identity token.
Args:
connection: MicroStrategy REST API connection object
identity_token: Identity token
Returns:
Complete HTTP response object.
"""
return connection.get(
url=f'{connection.base_url}/api/auth/identityToken',
headers={'X-MSTR-IdentityToken': identity_token},
)
@ErrorHandler(
err_msg='Error creating a new Web server session that shares an existing IServer '
'session.'
)
def delegate(connection, identity_token, whitelist=None):
"""Returns authentication token and cookies from given X-MSTR-
IdentityToken.
Args:
connection: MicroStrategy REST API connection object
identity_token: Identity token
whitelist: list of errors for which we skip printing error messages
Returns:
Complete HTTP response object.
"""
return connection.post(
skip_expiration_check=True,
url=f'{connection.base_url}/api/auth/delegate',
json={'loginMode': "-1", 'identityToken': identity_token},
)
@ErrorHandler(err_msg='Error getting privileges list.')
def user_privileges(connection):
"""Get the list of privileges for the authenticated user.
The response includes the name, ID, and description of each
privilege and specifies which projects the privileges are valid for.
Args:
connection: MicroStrategy REST API connection object
Returns:
Complete HTTP response object.
"""
return connection.get(url=f"{connection.base_url}/api/sessions/privileges")
@ErrorHandler(err_msg='Error getting info for authenticated user.')
def get_info_for_authenticated_user(connection, error_msg=None):
"""Get information for the authenticated user.
Args:
connection: MicroStrategy REST API connection object
error_msg (string, optional): Custom Error Message for Error Handling
Returns:
Complete HTTP response object.
"""
url = f'{connection.base_url}/api/sessions/userInfo'
return connection.get(url=url)
| [
"noreply@github.com"
] | noreply@github.com |
c241b675292580697fde86a8fafecba8ace59410 | 244856c712aaf0675aab546519a8f0552137f257 | /plantManagement/sensors/urls.py | aa124fb0603d9c1c3a05cdca68bae35eccfc8340 | [] | no_license | Sreepragnav16/Plant_Monitoring_System | 250199db9661cf000ef24c286a310262b9f6e056 | ac3990e1ff222c4f29aac0c7a4c2b1e9fb7300d7 | refs/heads/master | 2020-05-15T06:52:30.504201 | 2019-04-18T17:48:57 | 2019-04-18T17:48:57 | 182,131,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | from django.conf.urls import url
from . import views
app_name = 'sensors'
urlpatterns = [
# homepage
url(r'^$', views.index, name='index'),
#temperature
url(r'^temperature/$',views.temperature, name='temperature'),
#humidity
url(r'^humidity/$',views.humidity, name='humidity'),
#overhead tank
url(r'^OHT/$',views.OHT, name='OHT'),
#rain gauge
url(r'^rain/$',views.rain, name='rain'),
#weather station
url(r'^weather/$',views.weather, name='weather'),
#add new reading
url(r'^addreading/$', views.add_reading, name='addreading'),
#display particular plant info
url(r'^display/(?P<pid>[0-9]+)/$', views.display, name='display'),
url(r'^weather/display/(?P<pid>[0-9]+)/$', views.display, name='display'),
#soil moisture
url(r'^display/(?P<pid1>[0-9]+)/sm/(?P<pid>[0-9]+)/',views.sm, name='sm'),
url(r'^weather/display/(?P<pid1>[0-9]+)/sm/(?P<pid>[0-9]+)/',views.sm, name='sm'),
#add new plant
url(r'^addplant/$', views.addplant, name='addplant'),
#demo
url(r'^demo/$', views.demo, name='demo'),
#map
url(r'^map/',views.map, name='map'),
#about us
url(r'^about/$', views.about, name='about'),
# motorControl
url(r'^control/(?P<pid>[0-9]+)/$', views.motorControl, name='motorControl'),
]
| [
"noreply@github.com"
] | noreply@github.com |
862349a3c1000ce89313a3022db4edc1e1f3cf78 | b9d6de31eeaf92f77ed8cb48039b7d82963f249f | /student_companion/comments/migrations/0003_auto_20170129_2333.py | cc053db6657814674415909260fcf4a8a2de111f | [
"BSD-2-Clause"
] | permissive | tm-kn/student-companion-backend | 7c1364033a6edaf2f0d57fd4e9305abf679c698b | 5c0b12aee357b7b2dbaf6a5bb8710b9ecb501019 | refs/heads/master | 2021-03-30T15:53:58.542072 | 2017-03-13T19:45:46 | 2017-03-13T19:45:46 | 71,128,845 | 0 | 0 | null | 2016-10-25T14:36:14 | 2016-10-17T10:58:49 | Python | UTF-8 | Python | false | false | 629 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-29 23:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('comments', '0002_auto_20161206_1501'),
]
operations = [
migrations.AlterField(
model_name='placecomment',
name='place',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='place_comments', related_query_name='place_comment', to='places.Place', verbose_name='place'),
),
]
| [
"u1562595@unimail.hud.ac.uk"
] | u1562595@unimail.hud.ac.uk |
2ac2a7e1feba3b14b0453aeba17abc1736994421 | 337065b21aead25e5b69b3932d63e667799d2b7d | /liga09/src/liga.py | 5d52fae7ed46d3a7df7597d1e22c1eeeedee6d92 | [] | no_license | jels/ple-ieslosenlaces | 998e982290e56500652e17ec12d353679530dcbe | 92ba27e34ab10dc2018be411829856ad2912adb9 | refs/heads/master | 2021-01-10T21:18:24.635244 | 2010-05-11T09:05:23 | 2010-05-11T09:05:23 | 37,066,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 713 | py | # -*- encoding: utf-8 -*-
'''
Created on 19/11/2009
@author: dai
'''
from auxliga import *
from auxgrafico import *
# leer datos y preparar matriz
# datos_liga --> matriz con datos del fichero
datos_liga = crea_tabla(r'datos\liga09.csv')
# ej1. imprimir datos_liga: equipo y puntos
# por orden alfab�tico
puntos_equipos(datos_liga)
# ej2. imprimir datos_liga: equipo y puntos
# por orden en tabla de clasificaci�n
datos_liga.sort(ordena_puntos) # ordena matriz
print
print '*' * 20
print
puntos_equipos(datos_liga)
# imprime sólo nombres
nombres_equipos = nombres(datos_liga)
print sorted(nombres_equipos)
grafico(seis_primeros(datos_liga), 'Mejores equipos') | [
"morillas@02d82bf8-8a0b-11dd-8a42-c3ad82d480ce"
] | morillas@02d82bf8-8a0b-11dd-8a42-c3ad82d480ce |
eca1d0906ca94f9f81f2da44e42483721dc46ee6 | f45295baf6a46bf09669c382270ad1b3213781dc | /Code/check_resources.py | eb4320d69106446508ddad6c1c036012f13128ec | [] | no_license | arjun1886/Ethnicity_detection_DL | 8d2ab1e776f0dda8f8162e8dc334c861cd92145d | c8868089fe13d3eced4bd65d72b93362167e77d1 | refs/heads/master | 2020-12-21T03:24:49.617975 | 2020-01-26T09:50:00 | 2020-01-26T09:50:00 | 236,289,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,692 | py | __author__ = 'Douglas'
import urllib.request, os, bz2
dlib_facial_landmark_model_url = "http://ufpr.dl.sourceforge.net/project/dclib/dlib/v18.10/shape_predictor_68_face_landmarks.dat.bz2"
def download_file(url, dest):
file_name = url.split('/')[-1]
u = urllib.request.urlopen(url)
f = open(dest+"/"+file_name, 'wb')
meta = u.info()
file_size = int(meta.getheaders("Content-Length")[0])
print("Downloading: %s Size: %s (~%4.2fMB)") % (file_name, file_size, (file_size/1024./1024.))
file_size_dl = 0
block_sz = 8192
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
f.write(buffer)
if((file_size_dl*100./file_size) % 5 <= 0.01):
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status = status + chr(8)*(len(status)+1)
print(status)
f.close()
print("Download complete!")
def extract_bz2(fpath):
print("Extracting...")
new_file = open(fpath[:-4], "wb")
file = bz2.BZ2File(fpath, 'rb')
data = file.read()
new_file.write(data)
new_file.close()
print("Done!")
def check_dlib_landmark_weights():
dlib_models_folder = "dlib_models"
if(not os.path.isdir(dlib_models_folder)):
os.mkdir(dlib_models_folder)
if(not os.path.isfile(dlib_models_folder+"/shape_predictor_68_face_landmarks.dat")):
if(not os.path.isfile(dlib_models_folder+"/shape_predictor_68_face_landmarks.dat.bz2")):
download_file(dlib_facial_landmark_model_url, dlib_models_folder)
extract_bz2(dlib_models_folder+"/shape_predictor_68_face_landmarks.dat.bz2") | [
"arjun.rajesh1886@gmail.com"
] | arjun.rajesh1886@gmail.com |
17f6cbf71b9e2f8f1a98abc1469ee319dbad1d40 | 3e1339020e63327db55716344a7e02c0d503d260 | /applications/Imaging/L1MIGRATIONwVP/results/segsalt/precooked/SConstruct | 18d42de777c5409c14338e19fe8c83b1f660e31f | [
"MIT"
] | permissive | 13299118606/SLIM-release-apps | 975287c6555fc4c6fe76dcea9f5feb9225a30449 | c286f07312289c7d50057ac9379d2da30eea760f | refs/heads/master | 2022-03-09T20:42:23.490954 | 2019-08-22T23:12:44 | 2019-08-22T23:12:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 814 | import os
from rsf.proj import *
########################################################################
# RETRIEVE DATA
########################################################################
# define SLIM FTP server information
FTPserver = {
'server': 'ftp.slim.gatech.edu',
'login': 'ftp',
'password':''}
loc = os.path.join('SoftwareRelease','Imaging','L1MIGRATIONwVP','results')
files = ['linear_RTM.mat','linear_trueQ_GaussianEnc2_denoised.mat','linear_wrongQ2_GaussianEnc2_denoised.mat','linear_estQ_GaussianEnc2.mat','linear_estQ_GaussianEnc2_denoised.mat','iwave_RTM.mat','iwave_finv_trueQ_GaussianEnc2_denoised.mat','iwave_finv_estQ_GaussianEnc2.mat','iwave_finv_estQ_GaussianEnc2_denoised.mat']
# fetch data from FTP server
for elm in files:
Fetch(elm,loc,FTPserver)
| [
"henryk_modzelewski@mac.com"
] | henryk_modzelewski@mac.com | |
74599c94d3dcaa1bce72b0d69b9593c7f982f3b9 | c3b058773f4ee1ab5e8010284c40e8af1d19e5e8 | /trexRun.py | 9ae0b520317d7297e4fee168435d5614d084ca46 | [] | no_license | cmtzco/steem_mm | 5485e5bf5827571c80934c0661c90ce327f5a46e | fcd791d703ce5b20d3bd609316742b638da767a4 | refs/heads/master | 2021-06-17T08:19:03.160676 | 2017-06-11T21:50:59 | 2017-06-11T21:50:59 | 93,909,995 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,735 | py | from gekko import Trex
import config as c
import urllib2
import logging
import random
import time
import sys
#0.47405412 BTC
logging.basicConfig(filename='gekko.log',level=logging.INFO)
RUNNING = True
while RUNNING:
try:
c.lotSize = random.uniform(0.75, 1.25)
b = Trex(c.TrexKey, c.TrexSecret)
orders = b.getOpenOrders()
while RUNNING:
ticker = b.getCoinTicker()
btc = b.getCoinBalance('BTC')
steem = b.getCoinBalance('STEEM')
steemRate = b.getBuyRate(ticker)
try:
orders = b.getOpenOrders()
if b.checkMinBuyAmount(ticker):
bid = b.getBid(ticker)
buy = b.makeBuyOrder(ticker)
btc_balance = b.getCoinBalance('BTC')
steem_balance = b.getCoinBalance('STEEM')
orders = b.getOpenOrders()
print "[INFO][TREX][MM][BUY] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(buy['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders))
logging.info("[INFO][TREX][MM][BUY] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(buy['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders)))
elif steem > c.trexLotSize:
ask = b.getAsk(ticker)
sell = b.makeSellOrder(ticker)
btc_balance = b.getCoinBalance('BTC')
steem_balance = b.getCoinBalance('STEEM')
orders = b.getOpenOrders()
print "[INFO][TREX][MM][SELL] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(sell['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders))
logging.info(
"[INFO][TREX][MM][SELL] ORDERNUM: {}, BALANCES: {} BTC, {} STEEM, TOTAL OPEN ORDERS: {}".format(sell['result']['uuid'],
btc_balance,
steem_balance,
b.getNumOpenOrders(orders)))
# time.sleep(1)
# orders = b.getOpenOrders()
# for order in orders['result']:
# print "[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(order['OrderUuid']))
# logging.info("[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(order['OrderUuid'])))
else:
highscore = 0
ids = list()
for order in orders['result']:
ticker = b.getCoinTicker()
last = b.getLast(ticker)
furthestOrder = b.getFurthestOrderPercentage(order['limit'], last)
if furthestOrder > highscore:
highscore = furthestOrder
ids.append(order['result']['Uuid'])
print "[INFO][TREX][MM][CANCEL] Cancelling the following order IDs: {}".format(ids)
logging.info("[INFO][TREX][MM][CANCEL] Cancelling the following order IDs: {}".format(ids))
for id in ids:
print "[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(id))
logging.info("[INFO][TREX][MM][CANCEL][ORDER] Cancelled Order: {}".format(b.makeCancelOrder(id)))
orders = b.getOpenOrders()
print "[INFO][TREX][MM][ORDERS] Total Orders Open After Cancel:{}".format(b.getNumOpenOrders(orders))
logging.info("[INFO][TREX][MM][ORDERS] Total Orders Open After Cancel:{}".format(b.getNumOpenOrders(orders)))
print "[INFO][TREX][MM][ORDERS] Waiting for opportunity to buy/sell"
except urllib2.HTTPError as e:
print "[ERROR][TREX][MM][WHILE][HTTP] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][HTTP] {}".format(e))
time.sleep(20)
continue
except KeyError as e:
print "[ERROR][TREX][MM][WHILE][KEY] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][KEY] {}".format(e))
print "[ERROR][TREX][MM][WHILE][ORDERLIMIT]We've hit an order limit, waiting 20s to see if any orders fill{}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][ORDERLIMIT]We've hit an order limit, waiting 20s to see if any orders fill {}".format(e))
time.sleep(20)
pass
except ValueError as e:
print "[ERROR][TREX][MM][WHILE][VALUE] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][VALUE] {}".format(e))
pass
except TypeError as e:
print "[ERROR][TREX][MM][WHILE][TYPE] {}".format(e)
logging.error("[ERROR][TREX][MM][WHILE][TYPE] {}".format(e))
pass
except urllib2.HTTPError as e:
print "[ERROR][TREX][MM][MAIN] {}".format(e)
logging.error("[ERROR][TREX][MM][MAIN] {}".format(e))
time.sleep(20)
RUNNING = True
| [
"chris@cmtz.co"
] | chris@cmtz.co |
bbac7a89c8fce26e9c0cc1f44ccffe97946ed723 | 9a4babfb4abca418f3985387742613305bbd1975 | /ouds/article/views.py | 5fb06a392bf3241578121ffe1d18e3e2f843b430 | [] | no_license | joskid/ChunCu | 37f324669146bc134e4719fb73471268d9f900eb | dbbdf00848962efb65306b3baaac4b7ecba42f15 | refs/heads/master | 2021-01-15T21:39:03.680848 | 2012-03-16T11:48:13 | 2012-03-16T11:48:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,726 | py | # -*- coding: UTF-8 -*-
#===============================================================================
# Author: 骛之
# File Name: gd/member/admin.py
# Revision: 0.1
# Date: 2007-2-5 19:15
# Description:
#===============================================================================
import datetime
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.db.models import Q
from django.contrib.auth.decorators import login_required
from django.views.decorators.cache import cache_page
from ouds.settings import HOST_NAME, HOST_URL, ICON_SIZE, IMAGE_SIZE
from ouds.utils.comms import _md5_key
from ouds.article.models import Catalog, Tag, Topic, Entry, Comment
################################################
@cache_page(60 * 30)
def module(request, module, template_name = 'article/module.ouds'):
user = request.user
#topics = Topic.published.filter(catalog__module__exact = module)[:100]
return render_to_response(
template_name,
{
'user': user,
'module': module,
'catalog': None,
'tag': None,
#'topics': topics,
},
)
################################################
@cache_page(60 * 30)
def catalog(request, module, catalog, template_name = 'article/catalog_tag.ouds'):
user = request.user
catalog = Catalog.objects.get(module__exact = module, name__exact = catalog)
catalog.read_count += 1
catalog.save()
#topics = Topic.published.filter(catalog__exact = catalog)[:100]
return render_to_response(
template_name,
{
'user': user,
'module': module,
'catalog': catalog.name,
'tag': None,
#'topics': topics,
},
)
################################################
@cache_page(60 * 30)
def tag(request, module, catalog, tag, template_name = 'article/catalog_tag.ouds'):
user = request.user
tag = Tag.objects.get(catalog__name__exact = catalog, name__exact = tag)
tag.read_count += 1
tag.save()
#topics = Topic.published.filter(tags__exact = tag)[:100]
return render_to_response(
template_name,
{
'user': user,
'module': module,
'catalog': catalog,
'tag': tag.name,
#'topics': topics,
},
)
##################################
from ouds.utils.consts import IMG_TYPE, AI_DIR
from ouds.article.forms import TopicForm
@login_required
def add_topic(request, topic_form = TopicForm, template_name = 'article/add_topic.ouds'):
"""增加文章"""
user = request.user
if request.method == "POST":
data = request.POST
data['title'] = data['title'].strip()
now = datetime.datetime.now()
topic = Topic(id = _md5_key(now, user.username), profile = user.get_profile(), \
edit_date = now, is_approved = True) # is_recommended = True
topic_form = topic_form(data, instance = topic, auto_id = False)
if topic_form.is_valid():
topic = topic_form.save()
if request.FILES:
icon = request.FILES['icon']
if icon.size <= ICON_SIZE and (icon.name[-3:] in IMG_TYPE):
topic.icon.save(topic.id + icon.name[-4:], icon, save = True)
# 更新catalog
catalog = topic.catalog
catalog.post_count += 1
catalog.save()
# 标签处理
tags = data['tags'].strip().split()
for tag in tags:
# 增加tag
if not Tag.objects.filter(catalog__exact = catalog, name__exact = tag).exists():
Tag(catalog = catalog, name = tag).save()
# 更新tag和topic-tag
tag = Tag.objects.get(catalog__exact = catalog, name__exact = tag)
tag.post_count += 1
tag.save()
if not topic.tags.filter(name__exact = tag.name).exists():
topic.tags.add(tag)
return HttpResponseRedirect('/member/%s' % user.username)
else:
topic_form = topic_form(auto_id = False)
return render_to_response(
template_name,
{
'user': user,
'module': None,
'topic_form': topic_form,
},
)
##################################
from ouds.article.forms import CommentForm
def topic(request, module, catalog, year, month, day, id, template_name = 'article/topic.ouds'):
user = request.user
topic = Topic.objects.get(id__exact = id)
if request.method == 'POST':
entry_id = request.POST['entry_id']
else:
public_entries = topic.public_entries()
if public_entries:
entry_id = public_entries.latest('birth_date').id
else:
entry_id = None
try:
next_topic = topic.get_next_by_edit_date()
except Topic.DoesNotExist:
next_topic = None
try:
previous_topic = topic.get_previous_by_edit_date()
except Topic.DoesNotExist:
previous_topic = None
comments = topic.comments.all()
return render_to_response(
template_name,
{
'user': user,
'host_name': HOST_NAME,
'host_url': HOST_URL,
'module': module,
'catalog': catalog,
'topic': topic,
'entry_id': entry_id,
'next_topic': next_topic,
'previous_topic': previous_topic,
'comments': comments,
'comment_form': CommentForm(auto_id = False),
}
)
##################################
from ouds.article.forms import EntryForm
from ouds.utils.processimg import watermark
@login_required
def add_entry(request, topic_id, entry_form = EntryForm, template_name = 'article/add_entry.ouds'):
"""增加文章章节"""
user = request.user
if not Topic.objects.filter(id__exact = topic_id).exists():
return HttpResponseRedirect('/member/%s' % user.username)
else:
topic = Topic.objects.get(id__exact = topic_id)
if request.method == "POST":
data = request.POST
data['title'] = data['title'].strip()
entry = Entry(id = _md5_key(datetime.datetime.now(), user.username), topic = topic)
entry_form = entry_form(data, instance = entry, auto_id = False)
if entry_form.is_valid():
entry = entry_form.save()
if request.FILES:
image = request.FILES['image']
if image.size <= IMAGE_SIZE and (image.name[-3:] in IMG_TYPE):
entry.image.save(entry.id + image.name[-4:], image, save = True)
watermark(AI_DIR + entry.id + image.name[-4:]).save(AI_DIR + entry.id + image.name[-4:], quality = 90)
return HttpResponseRedirect('/member/%s' % user.username)
else:
entry_form = entry_form(auto_id = False)
return render_to_response(
template_name,
{
'user': user,
'module': None,
'entry_form': entry_form,
},
)
################################################
import random
from ouds.utils.consts import MODULE
def search(request, template_name = 'article/search.ouds'):
user = request.user
keywords = request.POST['keywords'].strip()
topics = Topic.published.filter(Q(title__icontains = keywords) | Q(description__icontains = keywords))
return render_to_response(
template_name,
{
'user': user,
'module': MODULE[random.randint(0, len(MODULE)-1)][0],
'keywords': keywords,
'topics': topics,
},
)
#######################################
def comment(request, topic_id, comment_form = CommentForm):
"""发表评论"""
data = request.POST
topic = Topic.objects.get(id__exact = topic_id)
comment = Comment(id = _md5_key(datetime.datetime.now(), data['author']), topic = topic, ip = request.META['REMOTE_ADDR'])
comment_form = comment_form(data, instance = comment, auto_id = False)
if comment_form.is_valid():
comment.save()
topic.comment_count += 1
topic.save()
#else:
# comment_form = comment_form(auto_id = False)
return HttpResponseRedirect(data['topic_url'])
| [
"ouds.cg@gmail.com"
] | ouds.cg@gmail.com |
f698177fc305cd817e720633840d5cc143725037 | d86aef9f61d2cce156f67ac2da76d7f18b4b881e | /Logic Gates/logic_gates_nn2.py | a30507ca574476c9724550015c31e1c87bddc5b7 | [] | no_license | dhan0779/ai-neural-networks | 353b95ab0bebad132ecc428bcb9dae37d54dd810 | 64fee9aa3346e0fe23d55a4a8007e3a43fd6ed42 | refs/heads/master | 2020-12-01T23:42:32.657842 | 2019-12-29T23:54:10 | 2019-12-29T23:54:10 | 230,816,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,406 | py | import sys,math,time,random
def main():
x = time.time()
real = []
inpfile = open(sys.argv[1],"r")
for line in inpfile:
fakelist = []
line = line.split(" ")
for ch in line:
if ch != "=>":
fakelist.append(int(ch))
real.append(fakelist)
input = []
for lis in real:
hi = []
for ch in lis:
hi.append(ch)
hi.pop()
hi.append(1)
input.append(hi)
weights = [[random.uniform(-2,2)]*(2*len(input[0])),[random.uniform(-2,2),random.uniform(-2,2)],[random.uniform(-2,2)]]
alpha = 0.3
totalerror = 1
iterations = 0
it10 = 0
while totalerror > 0.0009:
totalerror = 0
for i in range(0,len(input)):
ff = forwardfeed(input[i],weights,"T3")
errort = error(ff[len(ff)-1][0],real[i][len(real[i])-1])
totalerror += errort
weights = backpropagation(ff,weights,"T3",real[i][len(real[i])-1],alpha)
iterations+=1
if iterations != 0 and iterations%10 == 0 and totalerror > 0.1:
if abs(totalerror-it10) < 0.0001:
weights = [[random.uniform(-2, 2)] * (2 * len(input[0])),[random.uniform(-2, 2), random.uniform(-2, 2)], [random.uniform(-2, 2)]]
iterations = 0
else: it10 = totalerror+1-1
print("Layer cts:", [len(input[0]), 2, 1, 1])
print("Weights:")
print(weights[0])
print(weights[1])
print(weights[2])
print(totalerror)
def error(ffval,actual):
return 0.5*((actual-ffval)**2)
def transfer(input,x):
if input == "T1": return x
if input =="T2":
if x < 0: return 0
else: return x
if input == "T3": return 1/(1+math.e**-x)
if input == "T4": return (2 / (1 + math.exp(-1 * x))) - 1
def transfersderiv(input,x):
if input == "T1": return 1
if input =="T2":
if x < 0: return 0
else: return 1
if input == "T3": return x*(1-x)
if input == "T4": return (1-x**2)/2
def dot(list1,list2):
return sum(i[0] * i[1] for i in zip(list1, list2))
def forwardfeed(inputs,weights,transfers):
layerC = [inputs]
tmp = []
fin = []
for i in range(len(weights)):
current = weights[i]
next = []
if i != len(weights) - 1:
for j in range(len(current)):
tmp.append(weights[i][j])
if len(inputs) != 1 and j != 0 and (j+1) % (len(inputs)) == 0:
next.append(dot(tmp,inputs))
tmp = []
if len(inputs) == 1:
next.append(dot(tmp, inputs))
tmp = []
fin = []
for elem in next:
fin.append(transfer(transfers,elem))
next = fin
else:
fin = []
for z in range(len(inputs)):
fin.append(inputs[z]*current[z])
inputs = fin
layerC.append(inputs)
return layerC
def backpropagation(inputs,weight,transfersder,real,alpha):
newWeights = [[],[],[]]
E_list = [[],[],[]]
layer1 = []
for i in range(len(inputs)-1,0,-1):
if i == len(inputs)-2:
E_list[1].append((real-inputs[i][0])*weight[i][0]*transfersderiv(transfersder,inputs[i][0]))
newWeights[1].append(E_list[1][0]*inputs[1][0]*alpha+weight[1][0])
newWeights[1].append(E_list[1][0]*inputs[1][1]*alpha+weight[1][1])
elif i == len(inputs)-1:
E_list[2].append(real-inputs[i][0])
newWeights[2].append(E_list[2][0]*inputs[2][0]*alpha+weight[2][0])
else:
E_list[0].append((weight[i][0]*E_list[1][0])*transfersderiv(transfersder,inputs[i][0]))
E_list[0].append((weight[i][1]*E_list[1][0])*transfersderiv(transfersder,inputs[i][1]))
for j in range(int(len(weight[0])/2)):
layer1.append(E_list[0][0]*inputs[0][j]*alpha+weight[0][j])
layer1.append(E_list[0][1]*inputs[0][j]*alpha+weight[0][j+int(len(weight[0])/2)])
for i in range(0,len(layer1)):
if i%2== 0: newWeights[0].append(layer1[i])
for i in range(0,len(layer1)):
if i%2== 1: newWeights[0].append(layer1[i])
return newWeights
if __name__ == '__main__':
main() | [
"noreply@github.com"
] | noreply@github.com |
d59708d18019db8809fb41912e4fc24664b28503 | 13548d8c85e3f4b32181f1b54fb20bc81002491d | /lr_model.py | abe3e0ca3aecd0f13fdef9b45bb06135776e4efa | [] | no_license | sjtuprog/classification-models | ed9e5c32538566da3bf031dc4b4319821772b8e0 | 83b61569a26e232d0dec7db74035d1719412597b | refs/heads/master | 2021-07-11T13:56:37.064339 | 2017-10-15T05:09:45 | 2017-10-15T05:09:45 | 106,985,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,031 | py | from preprocessor.loader import *
import numpy as np
from preprocessor.utils import metrics
from sklearn.linear_model import LogisticRegression, LinearRegression
import os
def data_to_vector(data):
x_word, y = data
x = np.zeros(len(word_to_id))
for w in x_word:
x[w]+=1
return x, y
train, dev, test = load_file('corpus/example_data.json')
dico_words, word_to_id, id_to_word = word_mapping(train)
train_data = prepare_dataset(train, word_to_id)
test_data = prepare_dataset(test, word_to_id)
x_train = []
x_test = []
y_train = []
y_test = []
for t in train_data:
v, y = data_to_vector(t)
x_train.append(v)
y_train.append(y)
for t in test_data:
v, y = data_to_vector(t)
x_test.append(v)
y_test.append(y)
clf = LogisticRegression(C=1.0, dual=False, fit_intercept=True, intercept_scaling=1, class_weight='balanced',penalty='l2',n_jobs=4)
clf.fit(x_train,y_train)
y_predict = clf.predict(x_test)
a,p,r,f,auc = metrics(y_test, y_predict)
print 'Acc:%f, Prec:%f, Reca:%f, F1:%f, AUC:%f' %(a,p,r,f,auc)
| [
"sjtuprog@gmail.com"
] | sjtuprog@gmail.com |
01094b667d366115bc4a518070a10f4ac74ffa80 | 3423eb1ee4654222fc5b3e83489d4ef571f69308 | /sparkit_me_data_checking/models/vrf_verification_wizard.py | 21f552a9d91e47fcbffc4d4083a198516dfc12da | [] | no_license | janvierb/Sparkit | bd1f8fbd27228d6f0fcab19bddcc060fec40baba | 5db9f11be93bbd5fd379a3c07263f377114f5b2f | refs/heads/master | 2020-09-26T00:16:40.018403 | 2019-12-05T15:39:00 | 2019-12-05T15:39:00 | 226,121,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,263 | py | # -*- coding: utf-8 -*-
from openerp import models, fields, api
from openerp import exceptions
class sparkit_me_data_checking(models.TransientModel):
_name = 'sparkit.vrf_verification_wizard'
vrf_ids = fields.Many2many('sparkit.vrf', string="Visit Report Forms")
verified = fields.Boolean(string="Visit Report Form Verified and Attendance Information Entered?")
@api.multi
def do_mass_update(self):
self.ensure_one()
# else:
if self.verified:self.vrf_ids.write({'state':'approved'})
return True
@api.multi
def do_reopen_form(self):
self.ensure_one()
return {'type': 'ir.actions.act_window',
'res_model': self._name, # this model
'res_id': self.id, # the current wizard record
'view_type': 'form',
'view_mode': 'form',
'target': 'new'}
@api.multi
def do_populate_tasks(self):
self.ensure_one()
VRF = self.env['sparkit.vrf']
all_vrfs = VRF.search([('state', '!=', 'approved'), ('state', '!=', 'cancelled'), ('m_e_assistant_id', '=', self.env.uid)])
self.vrf_ids = all_vrfs
# reopen wizard form on same wizard record
return self.do_reopen_form()
| [
"janvierb@sparkmicrogrants.org"
] | janvierb@sparkmicrogrants.org |
9123cc72f71833fb8b4612137af9c487f54ecee8 | 3b63a9e4b00d69deb44a3e575f385217f936eedc | /prac_06/box_layout_demo.py | e208e4200afb0fa5c576746517b7bb9883327deb | [] | no_license | AbelLim/cp1404Practicals | 1e87ba6a76ea749fc54d51ec8fcc8435842190df | 0ebfa41176050ac8ef7f2ea5e2e33169ada15197 | refs/heads/master | 2020-03-06T22:40:18.154275 | 2018-05-31T10:03:40 | 2018-05-31T10:03:40 | 127,109,510 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 483 | py | from kivy.app import App
from kivy.lang import Builder
class BoxLayoutDemo(App):
def build(self):
self.title = "Box Layout Demo"
self.root = Builder.load_file('box_layout.kv')
return self.root
def handle_greet(self):
self.root.ids.output_label.text = "Hello {}".format(self.root.ids.input_name.text)
def handle_clear(self):
self.root.ids.output_label.text = ""
self.root.ids.input_name.text= ""
BoxLayoutDemo().run()
| [
"abel.lim@my.jcu.edu.au"
] | abel.lim@my.jcu.edu.au |
822472fe328593a9877481bba85b7d87cd7b60d3 | 642aff81fc7dcf253bd9d714234d6ad0b7f08e5f | /pymaginopolis/chunkyfile/common.py | bc0ea16a27a85a8436f019292d8d166d889a145a | [
"MIT"
] | permissive | frrabelo/pymaginopolis | b4a1d2707eb20eb942bb9c176614ab38136db491 | 022651ab9f6a809c754a5554114f5d1a3eca369b | refs/heads/master | 2023-03-18T01:40:32.747153 | 2021-02-28T03:57:31 | 2021-02-28T03:57:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,180 | py | import struct
from pymaginopolis.chunkyfile import model as model
from pymaginopolis.chunkyfile.model import Endianness, CharacterSet
GRPB_HEADER_SIZE = 20
CHARACTER_SETS = {
model.CharacterSet.ANSI: "latin1",
model.CharacterSet.UTF16LE: "utf-16le"
}
def get_string_size_format(characterset):
# FUTURE: big endian
if characterset == model.CharacterSet.UTF16BE or characterset == model.CharacterSet.UTF16LE:
return "H", 2, 2
else:
return "B", 1, 1
def parse_pascal_string_with_encoding(data):
"""
Read a character set followed by a pascal string
:param data:
:return: tuple containing string, number of bytes consumed and characterset
"""
# Read character set
character_set = struct.unpack("<H", data[0:2])[0]
character_set = model.CharacterSet(character_set)
chunk_name, string_size = parse_pascal_string(character_set, data[2:])
return chunk_name, string_size + 2, character_set
def parse_pascal_string(characterset, data):
"""
Read a Pascal string from a byte array using the given character set.
:param characterset: Character set to use to decode the string
:param data: binary data
:return: tuple containing string and number of bytes consumed
"""
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
if len(data) < string_size_size:
raise FileParseException("String size truncated")
string_size = struct.unpack("<" + string_size_format, data[0:string_size_size])[0] * character_size
string_data = data[string_size_size:string_size_size + string_size]
result = string_data.decode(CHARACTER_SETS[characterset])
total_size = string_size_size + string_size
return result, total_size
def generate_pascal_string(characterset, value):
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
encoded_string = value.encode(CHARACTER_SETS[characterset])
return struct.pack("<" + string_size_format, len(value)) + encoded_string
class FileParseException(Exception):
""" Raised if a problem is found with the chunky file. """
pass
def check_size(expected, actual, desc):
""" Raise an exception if this part of the file is truncated """
if actual < expected:
raise FileParseException("%s truncated: expected 0x%x, got 0x%x" % (desc, expected, actual))
def parse_u24le(data):
""" Parse a 24-bit little endian number """
return data[0] | (data[1] << 8) | (data[2] << 16)
def parse_endianness_and_characterset(data):
check_size(4, len(data), "Endianness/characterset")
endianness, characterset = struct.unpack("<2H", data)
endianness = model.Endianness(endianness)
characterset = model.CharacterSet(characterset)
return endianness, characterset,
def tag_bytes_to_string(tag):
"""
Convert the raw bytes for a tag into a string
:param tag: bytes (eg. b'\x50\x4d\x42\x4d')
:return: tag (eg. "MBMP")
"""
return tag[::-1].decode("ansi").rstrip("\x00")
def parse_grpb_list(data):
"""
Parse a GRPB chunk
:param data: GRPB chunk
:return: tuple containing endianness, characterset, index entry size, item index and item heap
"""
endianness, characterset, index_entry_size, number_of_entries, heap_size, unk1 = struct.unpack("<2H4I", data[
0:GRPB_HEADER_SIZE])
endianness = Endianness(endianness)
characterset = CharacterSet(characterset)
# TODO: figure out what this is
if unk1 != 0xFFFFFFFF:
raise NotImplementedError("can't parse this GRPB because unknown1 isn't 0xFFFFFFFF")
# Read heap
heap = data[GRPB_HEADER_SIZE:GRPB_HEADER_SIZE + heap_size]
# Read index
index_size = index_entry_size * number_of_entries
index_data = data[GRPB_HEADER_SIZE + heap_size:GRPB_HEADER_SIZE + heap_size + index_size]
index_items = [index_data[i * index_entry_size:(i + 1) * index_entry_size] for i in range(0, number_of_entries)]
return endianness, characterset, index_entry_size, index_items, heap
| [
"1490287+benstone@users.noreply.github.com"
] | 1490287+benstone@users.noreply.github.com |
7aa41765cd6860e2540b6f799c4551cd82d47f48 | 34148545a20f0b9fe07860d1107e6aab2ec1f75d | /info_spider/Scrapy_History_Hanchao_V1_01/build/lib/Scrapy_History_Hanchao_V1_01/spiders/Zhuixue_01.py | 139bef56439c9928931b6c7045a6f1948b1c9a0b | [] | no_license | tangzhutao/chf | 9bb9fa9b6ad75f1b587364e1005922c5bdddb4ca | 4b249aee9689d3669306bbf020ad7fbb7e6b92bc | refs/heads/master | 2022-12-03T03:55:17.308231 | 2020-08-21T09:57:47 | 2020-08-21T09:57:47 | 288,969,437 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,641 | py | # -*- coding: utf-8 -*-
import scrapy, time, re
from scrapy.utils import request
from Scrapy_History_Hanchao_V1_01.items import InfoItem
import requests
from urllib3 import encode_multipart_formdata
from Scrapy_History_Hanchao_V1_01.ApolloConfig import IMAGES_STORE, SPIDER_NAME, UPLOADURL
class Zhuixue01Spider(scrapy.Spider):
name = 'Zhuixue_01'
base_url = 'http://lishi.zhuixue.net'
url_name = '追学网'
def start_requests(self):
for i in range(3):
url = f'http://lishi.zhuixue.net/hanchao/list_43_{i + 1}.html'
req = scrapy.Request(url=url, callback=self.parse, dont_filter=True)
yield req
def parse(self, response):
get_info = response.xpath('//div[@class="list1"]/li/a/@href').extract()
for info in get_info:
url = self.base_url + info
req = scrapy.Request(url=url, callback=self.detail_parse, dont_filter=True)
news_id = request.request_fingerprint(req)
req.meta.update({'news_id': news_id})
yield req
def detail_parse(self, response):
headers = {}
for k, v in response.request.headers.items():
headers[k.decode()] = v[0].decode()
title = response.xpath('//ul[@class="lisbt"]/li[1]/span/h1/text()').extract_first()
try:
issue_time = re.findall(r'\d+-\d+-\d+ \d+:\d+', response.text)[0].split(' ')[0]
except IndexError:
issue_time = None
content = response.xpath('//ul[@class="lisnr"]').extract_first()
images_url = response.xpath('//ul[@class="lisnr"]//img/@src').extract()
item = InfoItem()
images = []
if images_url:
for image_url in images_url:
if 'http' in image_url:
link = image_url
else:
link = self.base_url + image_url
res = self.download_img(link, headers)
if res['success']:
self.logger.info({'图片下载完成': link})
images.append(res['data']['url'])
else:
self.logger.info({'图片下载失败': link})
item['images'] = ','.join(images) if images else None
item['category'] = '汉朝'
item['content_url'] = response.url
item['title'] = title
item['issue_time'] = issue_time if issue_time else None
item['information_source'] = '历史追学网'
item['sign'] = '19'
item['news_id'] = response.meta['news_id']
item['content'] = content
item['author'] = None
item['title_image'] = None
item['attachments'] = None
item['area'] = None
item['address'] = None
item['tags'] = None
item['update_time'] = str(int(time.time() * 1000))
item['source'] = None
if content:
yield item
self.logger.info({'title': title, 'issue_time': issue_time})
def download_img(self, url, headers):
resp = requests.get(url, headers=headers)
file_name = url.split('/')[-1]
file = {
'file': (file_name, resp.content)
}
send_url = UPLOADURL + SPIDER_NAME
encode_data = encode_multipart_formdata(file)
file_data = encode_data[0]
headers_from_data = {
"Content-Type": encode_data[1]
}
response = requests.post(url=send_url, headers=headers_from_data, data=file_data).json()
return response
if __name__ == '__main__':
from scrapy import cmdline
cmdline.execute(['scrapy', 'crawl', 'Zhuixue_01'])
| [
"18819492919@163.com"
] | 18819492919@163.com |
ec9c0cd180f50fb23acae69744788f81a9bfa036 | 8ccf7e6a93256fd83fed2bb7bd4f8bbe13dc1f40 | /Assignment 3. Paxos/Simulation/Agents/Proposer.py | c35f8b2ea5e2ba44032b554a298ca176490310d9 | [
"MIT"
] | permissive | WailAbou/Distributed-Processing | 5e2b84edc86b6d709c2599d82434731c6fd64dd6 | 46a36f1fd51d6f8b35cc639eb8002d81d7e09f2b | refs/heads/main | 2023-05-28T05:52:39.790190 | 2021-06-14T00:57:08 | 2021-06-14T00:57:08 | 367,988,336 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,059 | py | from Simulation.Agents import Agent
from Simulation.Message import Message, MessageTypes
class Proposer(Agent):
max_id = 0
def __init__(self, name, agent_id, value=None):
super().__init__(name, agent_id, value)
self.votes = 0
self.majority = False
self.suggested_value = None
self.consensus = False
Proposer.max_id = max(Proposer.max_id, agent_id + 1)
def recieve_promise(self, message, majority):
if message.source.value:
self.value = max(self.value, message.source.value)
self.votes += 1
if self.votes >= majority and not self.majority:
self.majority = True
return lambda acceptor: Message(message.destination, acceptor, MessageTypes.ACCEPT)
def recieve_accepted(self, message):
self.consensus = True
def init_value(self, value):
self.value = value
self.suggested_value = value
def reset(self):
self.votes = 0
self.majority = False
self.agent_id = Proposer.max_id
| [
"abou.w@hotmail.com"
] | abou.w@hotmail.com |
b12c14f2d187174e8f714e4790ec36839780011f | ac5d55e43eb2f1fb8c47d5d2a68336eda181d222 | /Reservoir Sampling/382. Linked List Random Node.py | 535508fa3eecbcc13bfe833e95712b6200c347d5 | [] | no_license | tinkle1129/Leetcode_Solution | 7a68b86faa37a3a8019626e947d86582549374b3 | 1520e1e9bb0c428797a3e5234e5b328110472c20 | refs/heads/master | 2021-01-11T22:06:45.260616 | 2018-05-28T03:10:50 | 2018-05-28T03:10:50 | 78,925,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,831 | py | # - * - coding:utf8 - * - -
###########################################
# Author: Tinkle
# E-mail: shutingnjupt@gmail.com
# Name: Linked List Random Node.py
# Creation Time: 2017/9/24
###########################################
'''
Given a singly linked list, return a random node's value from the linked list. Each node must have the same probability of being chosen.
Follow up:
What if the linked list is extremely large and its length is unknown to you? Could you solve this efficiently without using extra space?
Example:
// Init a singly linked list [1,2,3].
ListNode head = new ListNode(1);
head.next = new ListNode(2);
head.next.next = new ListNode(3);
Solution solution = new Solution(head);
// getRandom() should return either 1, 2, or 3 randomly. Each element should have equal probability of returning.
solution.getRandom();
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
import random
class Solution(object):
def __init__(self, head):
"""
@param head The linked list's head.
Note that the head is guaranteed to be not null, so it contains at least one node.
:type head: ListNode
"""
self.head = head
self.length = 0
ans = head
while (ans):
self.length += 1
ans = ans.next
def getRandom(self):
"""
Returns a random node's value.
:rtype: int
"""
index = random.randint(1, self.length) - 1
idx = 0
ans = self.head
while (idx < index):
ans = ans.next
idx += 1
return ans.val
# Your Solution object will be instantiated and called as such:
# obj = Solution(head)
# param_1 = obj.getRandom() | [
"496047829@qq.com"
] | 496047829@qq.com |
8e32eb64c0a7d5268003465a2906f21431987605 | c7713ed30e6edd751ccb811ad3fd48de30f94e33 | /WprimeToMuNu_M_2800_TuneCUETP8M1_13TeV_pythia8_cfi.py | e4a5272bebf0fe233280b78d8cc078d67e927a6c | [] | no_license | bdelacruz/usercode | 3be9fa8d3c761754c95a5c891c691dfd4baaa38d | 76cb706731cde5a4cfb0dec68c628ef39dc1408f | refs/heads/master | 2016-09-05T23:59:26.566827 | 2015-03-17T12:22:14 | 2015-03-17T12:22:14 | 32,382,473 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,375 | py | import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.Pythia8CUEP8M1Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
comEnergy = cms.double(13000.0),
crossSection = cms.untracked.double(0.020),
filterEfficiency = cms.untracked.double(1),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CUEP8M1SettingsBlock,
processParameters = cms.vstring(
'Main:timesAllowErrors = 10000',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tauMax = 10',
'Tune:ee 3',
'Tune:pp 5',
'NewGaugeBoson:ffbar2Wprime = on',
'34:m0 = 2800',
'34:onMode = off',
'34:onIfAny = 13,14',
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CUEP8M1Settings',
'processParameters')
)
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"begona.delacruz@ciemat.es"
] | begona.delacruz@ciemat.es |
4db543d1def850a08bae32fd819afa667f24aa08 | 31ac8f22185155729c66f0197ad619291a75c577 | /physics.py | 9a25fd307452791a1aebf3d4d4e518d1278c6462 | [] | no_license | RebeccaEEMartin/hackathongame | 9408dcba178104adc92a86e679e864ca21346698 | a02bf47b30d3040d8dcdc3517215283e4c6dffb1 | refs/heads/master | 2021-07-22T05:20:10.744194 | 2017-10-29T13:45:46 | 2017-10-29T13:45:46 | 108,660,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,217 | py | from math import pi
import random
import pygame
import PyParticles
(width, height) = (400, 400)
screen = pygame.display.set_mode((width, height))
pygame.display.set_caption('Springs')
universe = PyParticles.Environment((width, height))
universe.colour = (255,255,255)
universe.addFunctions(['move', 'bounce', 'collide', 'drag', 'accelerate'])
universe.acceleration = (pi, 0.01)
universe.mass_of_air = 0.02
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=True, x=175, y=100)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=True, x=225, y=100)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addParticles(mass=100, size=16, speed=2, elasticity=1, colour=(20,40,200), fixed=False)
universe.addSpring(0,1, length=50, strength=1)
universe.addSpring(1,2, length=50, strength=1)
universe.addSpring(3,4, length=50, strength=1)
universe.addSpring(4,5, length=50, strength=1)
selected_particle = None
paused = False
running = True
while running:
#print pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
paused = (True, False)[paused]
elif event.type == pygame.MOUSEBUTTONDOWN:
selected_particle = universe.findParticle(pygame.mouse.get_pos())
elif event.type == pygame.MOUSEBUTTONUP:
selected_particle = None
if selected_particle:
selected_particle.mouseMove(pygame.mouse.get_pos())
if not paused:
universe.update()
screen.fill(universe.colour)
for p in universe.particles:
pygame.draw.circle(screen, p.colour, (int(p.x), int(p.y)), p.size, 0)
for s in universe.springs:
pygame.draw.aaline(screen, (0,0,0), (int(s.p1.x), int(s.p1.y)), (int(s.p2.x), int(s.p2.y)))
pygame.display.flip() | [
"kelvinfowler168@gmail.com"
] | kelvinfowler168@gmail.com |
68e09501a51d712d45387f738b12c0239a752984 | b4777bf27a6d10d0e5b1c51351f9ad14a049b5e7 | /results_discrete_paradigm_acc.py | 1f08f50c522ed31784d9ff4e831821666ace9b7e | [] | no_license | bioelectric-interfaces/cfir | 1216ba1b62935f99f8821ccce2577be9cf71c6b8 | 6034b5216352e5d933405bccbe9a67b9e89c4735 | refs/heads/master | 2022-07-12T10:45:17.758669 | 2020-03-10T13:34:10 | 2020-03-10T13:34:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,356 | py | """
Figure 5: Discrete paradigm accuracy for one subject with median SNR
"""
import pandas as pd
import pylab as plt
import numpy as np
import seaborn as sns
from filters import CFIRBandEnvelopeDetector, RectEnvDetector
from utils import magnitude_spectrum
from constants import FS, DELAY_RANGE
from sklearn.metrics import roc_auc_score, average_precision_score, balanced_accuracy_score
def get_classes(y, alpha, n_states=3):
y_pred = np.zeros(len(y))
if n_states == 3:
y_pred[y > np.percentile(y, alpha)] = 1
y_pred[y > np.percentile(y, 100 - alpha)] = 2
if n_states == 2:
y_pred[y > np.percentile(y, 100 - alpha)] = 1
return y_pred
dataset = 8
eeg_df = pd.read_pickle('data/train_test_data.pkl').query('subj_id=={}'.format(dataset))
envelope = eeg_df['an_signal'].abs().values
band = eeg_df[['band_left', 'band_right']].values[0]
magnitude_spectrum_train = {}
_, weights = magnitude_spectrum(eeg_df['eeg'].values, FS)
stats_df = pd.read_pickle('results/stats.pkl').query('subj_id=={}'.format(dataset))
flatui = {'cfir':'#0099d8', 'acfir': '#84BCDA', 'wcfir':'#FE4A49', 'rect':'#A2A79E'}
alpha=5
#DELAY_RANGE = np.linspace(-50, 100, 51, dtype=int)
acc = np.zeros(len(DELAY_RANGE))
acc_rand = np.zeros(len(DELAY_RANGE))
fig, axes = plt.subplots(2, 2, sharey='col', figsize=(6,6))
plt.subplots_adjust(hspace=0.4, wspace=0.4)
for j_n_states, n_states in enumerate([2, 3]):
y_true = get_classes(envelope, alpha, n_states)
for method_name, method_class in zip(
['cfir', 'rect', 'wcfir'],
[CFIRBandEnvelopeDetector, RectEnvDetector, CFIRBandEnvelopeDetector]):
acc = np.zeros(len(DELAY_RANGE))*np.nan
for d, DELAY in enumerate(DELAY_RANGE):
if method_name == 'rect' and DELAY <0: continue
params = stats_df.query('method=="{}" & metric=="corr" & delay=="{}"'.format(method_name, DELAY*2))['params'].values[0]
params['weights'] = weights if method_name == 'wcfir' else None
env_det = method_class(band=band, fs=FS, delay=DELAY, **params)
envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
# params = stats_df.query('method=="rect" & metric=="corr"')['params'].values[0]
# env_det = WHilbertFilter(band=band, fs=FS, delay=DELAY, **params)
# envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
#
# params = stats_df.query('method=="whilbert" & metric=="corr"')['params'].values[0]
# env_det = WHilbertFilter(band=band, fs=FS, **params)
# envelope_pred = np.abs(env_det.apply(eeg_df['eeg'].values))
#
# params = stats_df.query('method=="ffiltar" & metric=="corr"')['params'].values[0]
# env_det = RectEnvDetector(band, FS, params['n_taps'], DELAY)
# env_det = WHilbertFilter(band=band, fs=FS, **params)
y_pred = get_classes(envelope_pred, alpha, n_states)
acc[d] = balanced_accuracy_score(y_true, y_pred) if (method_name in ['cfir', 'wcfir'] or DELAY>=0) else np.nan
axes[j_n_states, 1].plot(DELAY_RANGE*2, acc*100, '.-', label=method_name, color=flatui[method_name])
axes[j_n_states, 1].plot(DELAY_RANGE*2, DELAY_RANGE*0 + balanced_accuracy_score(y_true, y_true*0)*100, '.-', color='k', label='all-high')
# [ax.set_xlabel('Delay, ms') for ax in axes[:, 1]]
axes[1, 1].set_xlabel('Delay, ms')
axes[1, 1].legend()
axes[0, 1].set_ylabel('Balanced accuracy score, %')
axes[1, 1].set_ylabel('Balanced accuracy score, %')
axes[0, 0].set_title('A. High/Other\n', x = 0)
axes[1, 0].set_title('B. High/Middle/Low\n', ha='right')
[ax.axvline(0, color='k', linestyle='--', alpha=0.5, zorder=-1000) for ax in axes[:, 1]]
# plt.plot(envelope0ms)
# plt.plot(envelope)
#
# sns.kdeplot(envelope, envelope0ms)
# plt.savefig('results/viz/res-classification.png', dpi=500)
ax = axes
# fig, ax = plt.subplots(2, figsize=(6, 6))
up = np.percentile(envelope*1e6, 100-alpha)
low = np.percentile(envelope*1e6, alpha)
t = np.arange(len(envelope))/500
ax[0, 0].plot(t-58, envelope*1e6, color='k')
ax[0, 0].axhline(np.percentile(envelope*1e6, 100-alpha), color='k', linestyle='--')
ax[0, 0].text(8.5, up+4, 'High', ha='center')
ax[0, 0].text(8.5, up-3, 'Other', ha='center')
# plt.axhspan(np.percentile(envelope*1e6, alpha), np.percentile(envelope*1e6, 100-alpha), color=flatui['cfir'], alpha=0.5)
# plt.axhspan(np.percentile(envelope*1e6, alpha), -1000, color=flatui['wcfir'], alpha=0.5)
ax[0, 0].set_ylim(-7, 20)
ax[0, 0].set_xlim(0, 10)
ax[0, 0].set_ylabel('Envelope, $uV$')
ax[1, 0].plot(t-58, envelope*1e6, color='k')
ax[1, 0].axhline(np.percentile(envelope*1e6, 100-alpha), color='k', linestyle='--')
ax[1, 0].axhline(np.percentile(envelope*1e6, alpha), color='k', linestyle='--')
ax[1, 0].text(8.5, up+4, 'High', ha='center')
ax[1, 0].text(8.5, up-3, 'Middle', ha='center')
ax[1, 0].text(8.5, low-5, 'Low', ha='center')
# plt.axhspan(np.percentile(envelope*1e6, alpha), np.percentile(envelope*1e6, 100-alpha), color=flatui['cfir'], alpha=0.5)
# plt.axhspan(np.percentile(envelope*1e6, alpha), -1000, color=flatui['wcfir'], alpha=0.5)
ax[1, 0].set_ylim(-7, 20)
ax[1, 0].set_xlim(0, 10)
ax[1, 0].set_ylabel('Envelope, $uV$')
ax[1, 0].set_xlabel('Time, s')
# plt.savefig('results/viz/res-classification-explained.png', dpi=500) | [
"n.m.smetanin@gmail.com"
] | n.m.smetanin@gmail.com |
5ada83d5248851904d6558b3dd0fd921087c75a9 | e194614b5dea1a31e32059eaa2f0db2f8c553c63 | /worker.py | 190324061bb13228b03cfd533434d5bc7967509d | [] | no_license | DanielCatz/JobPostReader | 88782dfca05639fbd0ed6d8726877d0228fbcb5f | 3b2bf3d9e90d30916b00a364c0f822fa7364fe07 | refs/heads/master | 2022-12-21T08:52:56.911602 | 2017-08-30T22:54:24 | 2017-08-30T22:54:24 | 101,807,062 | 1 | 0 | null | 2022-12-08T00:35:43 | 2017-08-29T21:09:03 | CSS | UTF-8 | Python | false | false | 313 | py | import os
import redis
from rq import Worker, Queue, Connection
listen = ['default']
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
conn = redis.from_url(redis_url)
if __name__ == '__main__':
with Connection(conn):
worker = Worker(list(map(Queue, listen)))
worker.work()
| [
"daniel.caterson@gmail.com"
] | daniel.caterson@gmail.com |
a930b53c0f8ebd9f8fefa2ec7b113c3b4b1fd605 | 152782c6c30fd7723204e1458546f8bc56a4f04c | /nvtabular/loader/tensorflow.py | 479f6ecc57998c12671a286700f1730d36e95563 | [
"Apache-2.0"
] | permissive | yingcanw/NVTabular | c09a6cecb84d97be094ad8ecbba3c9331cc03bb9 | 372e3bb1c8057aa497f8971466642170630571a4 | refs/heads/main | 2023-03-30T23:49:42.102664 | 2021-03-24T23:06:32 | 2021-03-24T23:06:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,674 | py | #
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import contextlib
import os
import tensorflow as tf
from nvtabular.io.dataset import Dataset
from nvtabular.loader.backend import DataLoader
from nvtabular.loader.tf_utils import configure_tensorflow, get_dataset_schema_from_feature_columns
from nvtabular.ops import _get_embedding_order
from_dlpack = configure_tensorflow()
def _validate_dataset(paths_or_dataset, batch_size, buffer_size, engine, reader_kwargs):
# TODO: put this in parent class and allow
# torch dataset to leverage as well?
# if a dataset was passed, just return it
if isinstance(paths_or_dataset, Dataset):
return paths_or_dataset
# otherwise initialize a dataset
# from paths or glob pattern
if isinstance(paths_or_dataset, str):
files = tf.io.gfile.glob(paths_or_dataset)
_is_empty_msg = "Couldn't find file pattern {} in directory {}".format(
*os.path.split(paths_or_dataset)
)
else:
# TODO: some checking around attribute
# error here?
files = list(paths_or_dataset)
_is_empty_msg = "paths_or_dataset list must contain at least one filename"
assert isinstance(files, list)
if len(files) == 0:
raise ValueError(_is_empty_msg)
# implement buffer size logic
# TODO: IMPORTANT
# should we divide everything by 3 to account
# for extra copies laying around due to asynchronicity?
reader_kwargs = reader_kwargs or {}
if buffer_size >= 1:
if buffer_size < batch_size:
reader_kwargs["batch_size"] = int(batch_size * buffer_size)
else:
reader_kwargs["batch_size"] = buffer_size
else:
reader_kwargs["part_mem_fraction"] = buffer_size
return Dataset(files, engine=engine, **reader_kwargs)
def _validate_schema(feature_columns, cat_names, cont_names):
_uses_feature_columns = feature_columns is not None
_uses_explicit_schema = (cat_names is not None) or (cont_names is not None)
if _uses_feature_columns and _uses_explicit_schema:
raise ValueError(
"Passed `feature_column`s and explicit column names, must be one or the other"
)
elif _uses_feature_columns:
return get_dataset_schema_from_feature_columns(feature_columns)
elif _uses_explicit_schema:
cat_names = cat_names or []
cont_names = cont_names or []
return cat_names, cont_names
else:
raise ValueError(
"Must either pass a list of TensorFlow `feature_column`s "
"or explicit `cat_name` and `cont_name` column name lists."
)
class KerasSequenceLoader(tf.keras.utils.Sequence, DataLoader):
"""
Infinite generator used to asynchronously iterate through CSV or Parquet
dataframes on GPU by leveraging an NVTabular `Dataset`. Applies preprocessing
via NVTabular `Workflow` objects and outputs tabular dictionaries of TensorFlow
Tensors via `dlpack <https://github.com/dmlc/dlpack>`_. Useful for training tabular models
built in Keras and trained via
`tf.keras.Model.fit <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`_.
The data loading scheme is implemented by loading, preprocessing, and
batching data in an asynchronous thread. The amount of randomness in
shuffling is controlled by the `buffer_size` and `parts_per_chunk`
kwargs. At load time, sub-chunks of data with size controlled by
`buffer_size` are loaded from random partitions in the dataset,
and `parts_per_chunk` of them are concatenated into a single chunk,
shuffled, and split into batches. This means that each chunk has
`buffer_size*parts_per_chunk` rows, and due to the asynchronous
nature of the dataloader that means there are, including the batch
being processed by your network, `3*buffer_size*parts_per_chunk`
rows of data in GPU memory at any given time. This means that
for a fixed memory budget, using more `parts_per_chunk` will
come at the expense of smaller `buffer_size`, increasing the number
of reads and reducing throughput. The goal should be to maximize the
total amount of memory utilized at once without going OOM and with
the fewest number of reads to meet your epoch-level randomness needs.
An important thing to note is that TensorFlow's default behavior
is to claim all GPU memory for itself at initialziation time,
which leaves none for NVTabular to load or preprocess data.
As such, we attempt to configure TensorFlow to restrict
its memory allocation on a given GPU using the environment variables
`TF_MEMORY_ALLOCATION` and `TF_VISIBLE_DEVICE`. If `TF_MEMORY_ALLOCATION < 1`,
it will be assumed that this refers to a fraction of free GPU
memory on the given device. Otherwise, it will refer to an explicit
allocation amount in MB. `TF_VISIBLE_DEVICE` should be an integer GPU
index.
Iterator output is of the form `(dict(features), list(labels))`,
where each element of the features dict is a
`feature_name: feature_tensor` and each elemtn of the labels
list is a tensor, and all tensors are of shape `(batch_size, 1)`.
Note that this means vectorized continuous and multi-hot categorical
features are not currently supported.
The underlying NVTabular `Dataset` object is stored in the `data`
attribute, and should be used for updating NVTabular `Workflow`
statistics::
workflow = nvt.Workflow(...)
dataset = KerasSequenceLoader(...)
workflow.update_stats(dataset.data.to_iter(), record_stats=True)
Parameters
-------------
- paths_or_dataset: str or list(str)
Either a string representing a file pattern (see `tf.glob` for
pattern rules), a list of filenames to be iterated through, or
a Dataset object, in which case `buffer_size`, `engine`, and
`reader_kwargs` will be ignored
- batch_size: int
Number of samples to yield at each iteration
- label_names: list(str)
Column name of the target variable in the dataframe specified by
`paths_or_dataset`
- feature_columns: list(tf.feature_column) or None
A list of TensorFlow feature columns representing the inputs
exposed to the model to be trained. Columns with parent columns
will climb the parent tree, and the names of the columns in the
unique set of terminal columns will be used as the column names.
If left as None, must specify `cat_names` and `cont_names`
- cat_names: list(str) or None
List of categorical column names. Ignored if `feature_columns` is
specified
- cont_names: list(str) or None
List of continuous column names. Ignored if `feature_columns` is
specified
- engine: {'csv', 'parquet', None}, default None
String specifying the type of read engine to use. If left as `None`,
will try to infer the engine type from the file extension.
- shuffle: bool, default True
Whether to shuffle chunks of batches before iterating through them.
- buffer_size: float or int
If `0 < buffer_size < 1`, `buffer_size` will refer to the fraction of
total GPU memory to occupy with a buffered chunk. If `1 < buffer_size <
batch_size`, the number of rows read for a buffered chunk will
be equal to `int(buffer_size*batch_size)`. Otherwise, if `buffer_size >
batch_size`, `buffer_size` rows will be read in each chunk (except for
the last chunk in a dataset, which will, in general, be smaller).
Larger chunk sizes will lead to more efficieny and randomness,
but require more memory.
- devices: None
Which GPU devices to load from. Ignored for now
- parts_per_chunk: int
Number of dataset partitions with size dictated by `buffer_size`
to load and concatenate asynchronously. More partitions leads to
better epoch-level randomness but can negatively impact throughput
- reader_kwargs: dict
extra kwargs to pass when instantiating the underlying
`nvtabular.Dataset`
"""
_use_nnz = True
def __init__(
self,
paths_or_dataset,
batch_size,
label_names,
feature_columns=None,
cat_names=None,
cont_names=None,
engine=None,
shuffle=True,
buffer_size=0.1,
devices=None,
parts_per_chunk=1,
reader_kwargs=None,
):
dataset = _validate_dataset(
paths_or_dataset, batch_size, buffer_size, engine, reader_kwargs
)
cat_names, cont_names = _validate_schema(feature_columns, cat_names, cont_names)
# sort the ccolumns to avoid getting incorrect output
# (https://github.com/NVIDIA/NVTabular/issues/412)
cat_names = _get_embedding_order(cat_names)
cont_names = _get_embedding_order(cont_names)
assert devices is None or len(devices) == 1 # TODO: figure out multi-gpu support
devices = devices or [0]
DataLoader.__init__(
self,
dataset,
cat_names,
cont_names,
label_names,
batch_size,
shuffle,
parts_per_chunk=parts_per_chunk,
devices=devices,
)
def __len__(self):
"""
recreating since otherwise Keras yells at you
"""
# TODO: what's a better way to do this inheritance
# of the appropriate methods? A Metaclass?
return DataLoader.__len__(self)
def __getitem__(self, idx):
"""
implemented exclusively for consistency
with Keras model.fit. Does not leverage
passed idx in any way
"""
try:
return DataLoader.__next__(self)
except StopIteration:
# TODO: I would like to do a check for idx == 0
# here, but that requires that tf.keras.Model.fit
# be called with shuffle=False, and that seems
# small enough that it would be too easy to miss
# for many users. That said, blind reinitialization
# is probably irresponsible, so worth thinking
# of something better here
DataLoader.__iter__(self)
return DataLoader.__next__(self)
@contextlib.contextmanager
def _get_device_ctx(self, dev):
# with tf.device("/device:GPU:{}".format(dev)) as tf_device:
# # tf.device changes the cupy cuda device, which breaks us on multigpu
# # force cupy to still use the device we expect
# cupy.cuda.Device(dev).use()
# yield tf_device
# commenting out since device statements cause
# RuntimeErrors when exiting if two dataloaders
# are running at once (e.g. train and validation)
yield dev
def _split_fn(self, tensor, idx, axis=0):
return tf.split(tensor, idx, axis=axis)
@property
def _LONG_DTYPE(self):
return tf.int64
@property
def _FLOAT32_DTYPE(self):
return tf.float32
def _to_tensor(self, gdf, dtype=None):
if gdf.empty:
return
# checks necessary because of this bug
# https://github.com/tensorflow/tensorflow/issues/42660
if len(gdf.shape) == 1 or gdf.shape[1] == 1:
dlpack = gdf.to_dlpack()
elif gdf.shape[0] == 1:
dlpack = gdf.values[0].toDlpack()
else:
dlpack = gdf.values.T.toDlpack()
# catch error caused by tf eager context
# not being initialized
try:
x = from_dlpack(dlpack)
except AssertionError:
tf.random.uniform((1,))
x = from_dlpack(dlpack)
if gdf.shape[0] == 1:
# batch size 1 so got squashed to a vector
x = tf.expand_dims(x, 0)
elif len(gdf.shape) == 1 or len(x.shape) == 1:
# sort of a generic check for any other
# len(shape)==1 case, could probably
# be more specific
x = tf.expand_dims(x, -1)
elif gdf.shape[1] > 1:
# matrix which means we had to transpose
# for the bug above, so untranspose
x = tf.transpose(x)
return x
def _handle_tensors(self, cats, conts, labels):
X = {}
for tensor, names in zip([cats, conts], [self.cat_names, self.cont_names]):
lists = {}
if isinstance(tensor, tuple):
tensor, lists = tensor
names = [i for i in names if i not in lists]
# break list tuples into two keys, with postfixes
# TODO: better choices for naming?
list_columns = [i for i in lists.keys()]
for column in list_columns:
values, nnzs = lists.pop(column)
lists[column + "__values"] = values
lists[column + "__nnzs"] = nnzs
# now add in any scalar tensors
if len(names) > 1:
tensors = tf.split(tensor, len(names), axis=1)
lists.update({name: x for name, x in zip(names, tensors)})
elif len(names) == 1:
lists[names[0]] = tensor
X.update(lists)
# TODO: use dict for labels as well?
# would require output layers to match naming
if len(self.label_names) > 1:
labels = tf.split(labels, len(self.label_names), axis=1)
return X, labels
class KerasSequenceValidater(tf.keras.callbacks.Callback):
# TODO: document
_supports_tf_logs = True
def __init__(self, dataloader):
self.dataloader = dataloader
def on_epoch_end(self, epoch, logs={}):
for X, y_true in self.dataloader:
y_pred = self.model(X)
# TODO: how do we want to handle the multi-output case?
for metric in self.model.metrics:
metric.update_state(y_true, y_pred)
for metric in self.model.metrics:
logs["val_" + metric.name] = metric.result().numpy()
return logs
| [
"noreply@github.com"
] | noreply@github.com |
dde20ff95398266eb94923d9536dbe91b7e82d0c | 0b385cb36c601e483b77ba06f397c7dd66be9e70 | /day07/part1.py | ddb701bb43adc324b437e34a070ada479cb4cd7a | [] | no_license | Sebastian-/advent-of-code-2019 | 3cdddc8442a58c77e48d6e35e79ab5b7b38ec1d7 | 8adce696553f4c00c09de066ae67eed5e35fa4c0 | refs/heads/master | 2020-09-27T07:57:53.477125 | 2019-12-10T22:17:17 | 2019-12-10T22:17:17 | 226,469,518 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | import itertools
def getOpCode(i):
return int(str(i)[-2:])
def getParaModes(i):
modes = list(map(lambda x: int(x), str(i)[:-2]))
while len(modes) < 2:
modes.insert(0,0)
return modes
def getOperand(program, addr, mode):
operand = None
try:
operand = program[addr] if mode == 1 else program[program[addr]]
except IndexError:
pass
return operand
def execute(program, inputs):
pc = 0
while True:
op_code = getOpCode(program[pc])
modes = getParaModes(program[pc])
op1 = getOperand(program, pc + 1, modes[-1])
op2 = getOperand(program, pc + 2, modes[-2])
if op_code == 99:
return
# Add
if op_code == 1:
program[program[pc + 3]] = op1 + op2
pc += 4
continue
# Multiply
if op_code == 2:
program[program[pc + 3]] = op1 * op2
pc += 4
continue
# Input
if op_code == 3:
#x = input('Input a single integer: ')
x = inputs.pop(0)
program[program[pc + 1]] = int(x)
pc += 2
continue
# Output
if op_code == 4:
# print(op1)
# pc += 2
# continue
return op1
# Jump if true
if op_code == 5:
if op1 != 0:
pc = op2
else:
pc += 3
continue
# Jump if false
if op_code == 6:
if op1 == 0:
pc = op2
else:
pc += 3
continue
# Less than
if op_code == 7:
program[program[pc + 3]] = 1 if op1 < op2 else 0
pc += 4
continue
# Equals
if op_code == 8:
program[program[pc + 3]] = 1 if op1 == op2 else 0
pc += 4
continue
def execute_sequence(program, inputs):
next_stage = 0
while inputs:
p = program.copy()
next_stage = execute(p, [inputs.pop(0), next_stage])
return next_stage
def main():
with open('input.txt') as program_file:
program = program_file.read().split(',')
program = list(map(lambda x: int(x), program))
print(program)
max_thrust = 0
for perm in itertools.permutations([0,1,2,3,4]):
thrust = execute_sequence(program, list(perm))
max_thrust = max(max_thrust, thrust)
print(max_thrust)
if __name__ == "__main__":
main()
| [
"hmurgu@hotmail.com"
] | hmurgu@hotmail.com |
41124f0b638323fe0d56147e5d6b6fd13511885f | 2334ce5d9f1a151262ca6822e166ae5074f7e7b8 | /boj_lecture/dp/part1/boj11053.py | 806fa42aa23a6337c459809c32edb39aac068e07 | [] | no_license | passionCodingTest/Injeong | 6c9330360c7ef11d6dc05b1990db7d5b20bf3443 | b812f19b8733bc64e319ad81ee53edaf5290989f | refs/heads/main | 2023-06-22T16:33:22.509163 | 2021-07-27T12:55:31 | 2021-07-27T12:55:31 | 341,564,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 250 | py | import sys
input = sys.stdin.readline
n = int(input())
req = list(map(int, input().split()))
dp = [1 for _ in range(n)]
for i in range(n):
for j in range(i):
if req[i] > req[j]:
dp[i] = max(dp[i], dp[j] + 1)
print(max(dp)) | [
"injeong410@gmail.com"
] | injeong410@gmail.com |
b9003cef7f46933dcddd21d28e39822b4d63acb2 | e3a61e3353b8f20f56fc3adbb3d84ea500f798da | /Code/dummyReduce.py | 38d553d56f27261cbf068d64fbe3004b53d13ec7 | [] | no_license | JamieThomson97/Cloud-Computing | 11522966f26b48a0b4c903c6a7b733fd480e440e | 5fd988e0f8f8e02524cc605943ddb52806e1bac0 | refs/heads/master | 2020-04-03T21:53:50.985201 | 2018-11-21T21:09:51 | 2018-11-21T21:09:51 | 155,585,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,438 | py | #!/usr/bin/env python
# Takes system input, where each line a Key Value pair of a lexicographically sorted word as the Key, and the actual word as the Value
# Outputs a list of every occurrence of every anagram in the input
import sys
# Dictionary that the Key Value pairs will be added to
anagram_pairs = {}
for line in sys.stdin:
# For every line input, separate the string on the "tab" character
# This will produce a list containing the Key as element 0 and Value as element 1
words = line.split("\t")
# Assign the Key and the Value
key = words[0].strip("\r\n")
value = words[1].strip("\r\n")
# If the Key (word sorted lexicographically) is not in the anagram_pairs dictionary,
# i.e. hasn't appeared in the input yet,
# Add the Key as a new Key in the anagram_pairs dictionary
if key not in anagram_pairs:
anagram_pairs[key] = []
# If the Value is not already in the current Key's values,
# e.g. this anagram of the current word, has not appeared in input yet
# Add the Value to the Key's values
if value not in anagram_pairs[key]:
anagram_pairs[key].append(value)
# For every Key-Values set in anagram_pairs
for i in anagram_pairs:
# If there is at least 2 words in the values, i.e. at least one pair of anagrams
if len(anagram_pairs[i]) > 1:
# Output the set of anagrams for that particular word
print(str(anagram_pairs[i]))
| [
"j.thomson-15@student.lboro.ac.uk"
] | j.thomson-15@student.lboro.ac.uk |
19a7c46c69e57295cfca3ac8ae09ffd075ac82a6 | c005eb04da66147c2e7e7de7e5d106ad6bb114c2 | /codes/exercise.py | a30dad2f78d8fd1cb7b1044806fbc1096b114586 | [] | no_license | maydaycha/thesis | 9bc9875599827ab421f6dc9349cb9f698161115b | 2a5b2c33d8c4b0dc18bf18a846c5b291b4d1fa11 | refs/heads/master | 2021-05-03T10:02:23.620563 | 2015-07-23T18:16:08 | 2015-07-23T18:16:08 | 32,448,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,664 | py | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
print "fig_num: %d" % fig_num
print "kernel" + kernel
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| [
"maydaychaaaa@gmail.com"
] | maydaychaaaa@gmail.com |
0598b8fd9500c32a0495c33197d6df04676bd050 | fe771c763cfad64820b6954f63999b325525d003 | /app/models.py | 8c8b83fe60c7167e30011de961e387c6654af341 | [
"MIT"
] | permissive | plenario/plenario | 69c5c1f87ce398a6c501a1aab8797bf539c9f0a6 | 0808cd90b88c37f11a40445bd200d4740dd4dfa9 | refs/heads/master | 2021-11-13T07:42:34.499848 | 2021-11-11T02:54:26 | 2021-11-11T02:54:26 | 97,568,258 | 68 | 14 | MIT | 2020-05-06T01:09:15 | 2017-07-18T07:33:59 | HTML | UTF-8 | Python | false | false | 1,273 | py | from app import db
from sqlalchemy.dialects.postgresql import JSON
import enum
class VoteType(enum.Enum):
__tablename__ = 'votetype'
positive = "A favor"
negative = "Contra"
absence = "Ausência"
abstention = "Abstenção"
class Senator(db.Model):
__tablename__ = 'senator'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), index=True, unique=True)
party = db.Column(db.String(30), index=True)
state = db.Column(db.String(5), index=True)
description = db.Column(db.Text)
source = db.Column(db.String(120))
twitter = db.Column(db.String(120), unique=True)
facebook = db.Column(db.String(120), unique=True)
instagram = db.Column(db.String(120), unique=True)
class Proposition(db.Model):
__tablename__ = 'proposition'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120), index=True, unique=True)
description = db.Column(db.Text)
date = db.Column(db.DateTime)
class Vote(db.Model):
__tablename__ = 'vote'
id = db.Column(db.Integer, primary_key=True)
vote = db.Column(db.Enum(VoteType))
senator = db.Column(db.Integer, db.ForeignKey('senator.id'))
proposition = db.Column(db.Integer, db.ForeignKey('proposition.id'))
| [
"schwendler@gmail.com"
] | schwendler@gmail.com |
ca674d56b645b5721ff9210287a3026a3c86b84d | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2801/58758/256072.py | 829cc7621c561a24efea43b99bb9b2ba608d94f2 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 237 | py | n = int(input())
nums = [int(x) for x in input().split()]
nums.sort()
flag = False
for i in range(0, len(nums)-2):
if nums[i] + nums[i+1] > nums[i+2]:
flag = True
break
if flag:
print('YES')
else:
print('NO')
| [
"1069583789@qq.com"
] | 1069583789@qq.com |
76c78b98b9dca510bcb2a7cf815e747ee72e0281 | 6c5f20372604ade5153f54f55b29926e53f51ede | /CodiciSorgentiMButtu/cap6/myenum/06/test_myenum.py | acd32e2750030161dea599871164eab548d8d073 | [] | no_license | Johnny1809/Esercizi-Python | d38dd102c18134230ed9260f1a0739677b533ccc | f4a4d79d0518f0630a8631ba51591baa0b3ce552 | refs/heads/main | 2023-08-14T10:22:57.487917 | 2021-09-30T16:10:37 | 2021-09-30T16:10:37 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,390 | py | import unittest
from myenum import *
class TestPasta(unittest.TestCase):
def setUp(self):
class Pasta(MyEnum):
spaghetti = 1
lasagne = 2
tagliatelle = 3
self.Pasta = Pasta
class PastaAlias(MyEnum):
spaghetti = 1
lasagne = 2
tagliatelle = 1
self.PastaAlias = PastaAlias
def test_membersOrder(self):
"""Verifica che i membri siano ordinati secondo l'ordine di definizione."""
self.assertListEqual(['spaghetti', 'lasagne', 'tagliatelle'], list(self.Pasta.__members__))
def test_isInstance(self):
"""Verifica che i membri siano istanze della classe Pasta."""
for member in self.Pasta.__members__.values():
self.assertIsInstance(member, self.Pasta)
def test_memberAttributes(self):
"""Verifica che gli attributi name e value dei membri siano corretti."""
self.assertEqual(self.Pasta.spaghetti.name, 'spaghetti')
self.assertEqual(self.Pasta.spaghetti.value, 1)
def test_noHomonym(self):
"""Verifica che non vi siano membri con lo stesso nome."""
namespace = Namespace({'spaghetti': 1})
self.assertRaises(KeyError, namespace.update, {'spaghetti': 1})
def test_doNotChange(self):
"""Verifica che i membri non possano essere ne' riassegnati ne' cancellati."""
self.assertRaises(AttributeError, setattr, self.Pasta, 'spaghetti', 2)
self.assertRaises(AttributeError, delattr, self.Pasta, 'spaghetti')
def test_aliases(self):
"""Verifica che un membro con stesso valore di uno esistente sia un alias."""
self.assertIs(self.PastaAlias.spaghetti, self.PastaAlias.tagliatelle)
def test_iterable(self):
"""Verifica che le enumerazioni siano oggetti iterabili."""
self.assertCountEqual(self.Pasta.__members__.values(), list(self.Pasta))
def test_aliasAndIterations(self):
"""Verifica che gli alias non compaiano quando si itera sulla enumerazione."""
desired = [self.PastaAlias.spaghetti, self.PastaAlias.lasagne]
self.assertListEqual(desired, list(self.PastaAlias))
def test_getitem(self):
"""Verifica che Pasta['nome_membro'] restituisca il membro."""
self.assertIs(self.Pasta.spaghetti, self.Pasta['spaghetti'])
if __name__ == '__main__':
unittest.main()
| [
"89039573+Johnny1809@users.noreply.github.com"
] | 89039573+Johnny1809@users.noreply.github.com |
91550b4f5fdd38d817fb48cbdf64b89d252cf433 | a42dc61014a8d81d93a7a3403b94dab0c48e3b4c | /IB/code/option_chain_example_1_tws.py | e5ff27d6a19cc261fe0d3d4fcca97c6693c877bd | [] | no_license | AndSemenoff/andsemenoff.github.io | 2e3ae881dd2ec93dc58f04a12e6b533fd857aca6 | 154ef0cb9f1d304631e90268e443ca9c0b81b696 | refs/heads/master | 2023-08-18T05:00:57.731584 | 2023-08-11T17:47:50 | 2023-08-11T17:47:50 | 41,863,663 | 0 | 2 | null | 2015-11-22T16:04:19 | 2015-09-03T14:16:33 | JavaScript | UTF-8 | Python | false | false | 1,171 | py | from ibapi.client import EClient
from ibapi.wrapper import EWrapper
from ibapi.contract import Contract
from threading import Timer
class TestApp(EWrapper, EClient):
def __init__(self):
EClient.__init__(self, self)
def error(self, reqId, errorCode, errorString):
print("Error: ", reqId, " ", errorCode, " ", errorString)
def nextValidId(self, orderId):
self.start()
def contractDetails(self, reqId, contractDetails):
print("contractDetails: ", reqId, " ", contractDetails, "\n")
def contractDetailsEnd(self, reqId):
print("\ncontractDetails End\n")
def start(self):
contract = Contract()
contract.symbol = "AAPL"
contract.secType = "OPT"
contract.exchange = "SMART"
contract.currency = "USD"
contract.lastTradeDateOrContractMonth = "202203" # June 2022
self.reqContractDetails(1, contract)
def stop(self):
self.done = True
self.disconnect()
def main():
app = TestApp()
app.nextOrderId = 0
app.connect("127.0.0.1", 7497, 0)
Timer(4, app.stop).start()
app.run()
if __name__ == "__main__":
main() | [
"andsemenoff@yandex.ru"
] | andsemenoff@yandex.ru |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.