diff --git "a/data/dataset_Nowcasting.csv" "b/data/dataset_Nowcasting.csv" new file mode 100644--- /dev/null +++ "b/data/dataset_Nowcasting.csv" @@ -0,0 +1,18186 @@ +"keyword","repo_name","file_path","file_extension","file_size","line_count","content","language" +"Nowcasting","covid-19-Re/estimateR","NEWS.md",".md","141","10","NEWS +================ + +# estimateR 0.2 +* Added functions for simulating incidence data +* Various fixes + +# estimateR 0.1 +Initial beta release +","Markdown" +"Nowcasting","covid-19-Re/estimateR","LICENSE.md",".md","34904","596","GNU General Public License +========================== + +_Version 3, 29 June 2007_ +_Copyright © 2007 Free Software Foundation, Inc. <>_ + +Everyone is permitted to copy and distribute verbatim copies of this license +document, but changing it is not allowed. + +## Preamble + +The GNU General Public License is a free, copyleft license for software and other +kinds of works. + +The licenses for most software and other practical works are designed to take away +your freedom to share and change the works. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change all versions of a +program--to make sure it remains free software for all its users. We, the Free +Software Foundation, use the GNU General Public License for most of our software; it +applies also to any other work released this way by its authors. You can apply it to +your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General +Public Licenses are designed to make sure that you have the freedom to distribute +copies of free software (and charge for them if you wish), that you receive source +code or can get it if you want it, that you can change the software or use pieces of +it in new free programs, and that you know you can do these things. + +To protect your rights, we need to prevent others from denying you these rights or +asking you to surrender the rights. Therefore, you have certain responsibilities if +you distribute copies of the software, or if you modify it: responsibilities to +respect the freedom of others. + +For example, if you distribute copies of such a program, whether gratis or for a fee, +you must pass on to the recipients the same freedoms that you received. You must make +sure that they, too, receive or can get the source code. And you must show them these +terms so they know their rights. + +Developers that use the GNU GPL protect your rights with two steps: **(1)** assert +copyright on the software, and **(2)** offer you this License giving you legal permission +to copy, distribute and/or modify it. + +For the developers' and authors' protection, the GPL clearly explains that there is +no warranty for this free software. For both users' and authors' sake, the GPL +requires that modified versions be marked as changed, so that their problems will not +be attributed erroneously to authors of previous versions. + +Some devices are designed to deny users access to install or run modified versions of +the software inside them, although the manufacturer can do so. This is fundamentally +incompatible with the aim of protecting users' freedom to change the software. The +systematic pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we have designed +this version of the GPL to prohibit the practice for those products. If such problems +arise substantially in other domains, we stand ready to extend this provision to +those domains in future versions of the GPL, as needed to protect the freedom of +users. + +Finally, every program is threatened constantly by software patents. States should +not allow patents to restrict development and use of software on general-purpose +computers, but in those that do, we wish to avoid the special danger that patents +applied to a free program could make it effectively proprietary. To prevent this, the +GPL assures that patents cannot be used to render the program non-free. + +The precise terms and conditions for copying, distribution and modification follow. + +## TERMS AND CONDITIONS + +### 0. Definitions + +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + +“The Program” refers to any copyrightable work licensed under this +License. Each licensee is addressed as “you”. “Licensees” and +“recipients” may be individuals or organizations. + +To “modify” a work means to copy from or adapt all or part of the work in +a fashion requiring copyright permission, other than the making of an exact copy. The +resulting work is called a “modified version” of the earlier work or a +work “based on” the earlier work. + +A “covered work” means either the unmodified Program or a work based on +the Program. + +To “propagate” a work means to do anything with it that, without +permission, would make you directly or secondarily liable for infringement under +applicable copyright law, except executing it on a computer or modifying a private +copy. Propagation includes copying, distribution (with or without modification), +making available to the public, and in some countries other activities as well. + +To “convey” a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through a computer +network, with no transfer of a copy, is not conveying. + +An interactive user interface displays “Appropriate Legal Notices” to the +extent that it includes a convenient and prominently visible feature that **(1)** +displays an appropriate copyright notice, and **(2)** tells the user that there is no +warranty for the work (except to the extent that warranties are provided), that +licensees may convey the work under this License, and how to view a copy of this +License. If the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + +### 1. Source Code + +The “source code” for a work means the preferred form of the work for +making modifications to it. “Object code” means any non-source form of a +work. + +A “Standard Interface” means an interface that either is an official +standard defined by a recognized standards body, or, in the case of interfaces +specified for a particular programming language, one that is widely used among +developers working in that language. + +The “System Libraries” of an executable work include anything, other than +the work as a whole, that **(a)** is included in the normal form of packaging a Major +Component, but which is not part of that Major Component, and **(b)** serves only to +enable use of the work with that Major Component, or to implement a Standard +Interface for which an implementation is available to the public in source code form. +A “Major Component”, in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system (if any) on which +the executable work runs, or a compiler used to produce the work, or an object code +interpreter used to run it. + +The “Corresponding Source” for a work in object code form means all the +source code needed to generate, install, and (for an executable work) run the object +code and to modify the work, including scripts to control those activities. However, +it does not include the work's System Libraries, or general-purpose tools or +generally available free programs which are used unmodified in performing those +activities but which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for the work, and +the source code for shared libraries and dynamically linked subprograms that the work +is specifically designed to require, such as by intimate data communication or +control flow between those subprograms and other parts of the work. + +The Corresponding Source need not include anything that users can regenerate +automatically from other parts of the Corresponding Source. + +The Corresponding Source for a work in source code form is that same work. + +### 2. Basic Permissions + +All rights granted under this License are granted for the term of copyright on the +Program, and are irrevocable provided the stated conditions are met. This License +explicitly affirms your unlimited permission to run the unmodified Program. The +output from running a covered work is covered by this License only if the output, +given its content, constitutes a covered work. This License acknowledges your rights +of fair use or other equivalent, as provided by copyright law. + +You may make, run and propagate covered works that you do not convey, without +conditions so long as your license otherwise remains in force. You may convey covered +works to others for the sole purpose of having them make modifications exclusively +for you, or provide you with facilities for running those works, provided that you +comply with the terms of this License in conveying all material for which you do not +control copyright. Those thus making or running the covered works for you must do so +exclusively on your behalf, under your direction and control, on terms that prohibit +them from making any copies of your copyrighted material outside their relationship +with you. + +Conveying under any other circumstances is permitted solely under the conditions +stated below. Sublicensing is not allowed; section 10 makes it unnecessary. + +### 3. Protecting Users' Legal Rights From Anti-Circumvention Law + +No covered work shall be deemed part of an effective technological measure under any +applicable law fulfilling obligations under article 11 of the WIPO copyright treaty +adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention +of such measures. + +When you convey a covered work, you waive any legal power to forbid circumvention of +technological measures to the extent such circumvention is effected by exercising +rights under this License with respect to the covered work, and you disclaim any +intention to limit operation or modification of the work as a means of enforcing, +against the work's users, your or third parties' legal rights to forbid circumvention +of technological measures. + +### 4. Conveying Verbatim Copies + +You may convey verbatim copies of the Program's source code as you receive it, in any +medium, provided that you conspicuously and appropriately publish on each copy an +appropriate copyright notice; keep intact all notices stating that this License and +any non-permissive terms added in accord with section 7 apply to the code; keep +intact all notices of the absence of any warranty; and give all recipients a copy of +this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer +support or warranty protection for a fee. + +### 5. Conveying Modified Source Versions + +You may convey a work based on the Program, or the modifications to produce it from +the Program, in the form of source code under the terms of section 4, provided that +you also meet all of these conditions: + +* **a)** The work must carry prominent notices stating that you modified it, and giving a +relevant date. +* **b)** The work must carry prominent notices stating that it is released under this +License and any conditions added under section 7. This requirement modifies the +requirement in section 4 to “keep intact all notices”. +* **c)** You must license the entire work, as a whole, under this License to anyone who +comes into possession of a copy. This License will therefore apply, along with any +applicable section 7 additional terms, to the whole of the work, and all its parts, +regardless of how they are packaged. This License gives no permission to license the +work in any other way, but it does not invalidate such permission if you have +separately received it. +* **d)** If the work has interactive user interfaces, each must display Appropriate Legal +Notices; however, if the Program has interactive interfaces that do not display +Appropriate Legal Notices, your work need not make them do so. + +A compilation of a covered work with other separate and independent works, which are +not by their nature extensions of the covered work, and which are not combined with +it such as to form a larger program, in or on a volume of a storage or distribution +medium, is called an “aggregate” if the compilation and its resulting +copyright are not used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work in an aggregate +does not cause this License to apply to the other parts of the aggregate. + +### 6. Conveying Non-Source Forms + +You may convey a covered work in object code form under the terms of sections 4 and +5, provided that you also convey the machine-readable Corresponding Source under the +terms of this License, in one of these ways: + +* **a)** Convey the object code in, or embodied in, a physical product (including a +physical distribution medium), accompanied by the Corresponding Source fixed on a +durable physical medium customarily used for software interchange. +* **b)** Convey the object code in, or embodied in, a physical product (including a +physical distribution medium), accompanied by a written offer, valid for at least +three years and valid for as long as you offer spare parts or customer support for +that product model, to give anyone who possesses the object code either **(1)** a copy of +the Corresponding Source for all the software in the product that is covered by this +License, on a durable physical medium customarily used for software interchange, for +a price no more than your reasonable cost of physically performing this conveying of +source, or **(2)** access to copy the Corresponding Source from a network server at no +charge. +* **c)** Convey individual copies of the object code with a copy of the written offer to +provide the Corresponding Source. This alternative is allowed only occasionally and +noncommercially, and only if you received the object code with such an offer, in +accord with subsection 6b. +* **d)** Convey the object code by offering access from a designated place (gratis or for +a charge), and offer equivalent access to the Corresponding Source in the same way +through the same place at no further charge. You need not require recipients to copy +the Corresponding Source along with the object code. If the place to copy the object +code is a network server, the Corresponding Source may be on a different server +(operated by you or a third party) that supports equivalent copying facilities, +provided you maintain clear directions next to the object code saying where to find +the Corresponding Source. Regardless of what server hosts the Corresponding Source, +you remain obligated to ensure that it is available for as long as needed to satisfy +these requirements. +* **e)** Convey the object code using peer-to-peer transmission, provided you inform +other peers where the object code and Corresponding Source of the work are being +offered to the general public at no charge under subsection 6d. + +A separable portion of the object code, whose source code is excluded from the +Corresponding Source as a System Library, need not be included in conveying the +object code work. + +A “User Product” is either **(1)** a “consumer product”, which +means any tangible personal property which is normally used for personal, family, or +household purposes, or **(2)** anything designed or sold for incorporation into a +dwelling. In determining whether a product is a consumer product, doubtful cases +shall be resolved in favor of coverage. For a particular product received by a +particular user, “normally used” refers to a typical or common use of +that class of product, regardless of the status of the particular user or of the way +in which the particular user actually uses, or expects or is expected to use, the +product. A product is a consumer product regardless of whether the product has +substantial commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + +“Installation Information” for a User Product means any methods, +procedures, authorization keys, or other information required to install and execute +modified versions of a covered work in that User Product from a modified version of +its Corresponding Source. The information must suffice to ensure that the continued +functioning of the modified object code is in no case prevented or interfered with +solely because modification has been made. + +If you convey an object code work under this section in, or with, or specifically for +use in, a User Product, and the conveying occurs as part of a transaction in which +the right of possession and use of the User Product is transferred to the recipient +in perpetuity or for a fixed term (regardless of how the transaction is +characterized), the Corresponding Source conveyed under this section must be +accompanied by the Installation Information. But this requirement does not apply if +neither you nor any third party retains the ability to install modified object code +on the User Product (for example, the work has been installed in ROM). + +The requirement to provide Installation Information does not include a requirement to +continue to provide support service, warranty, or updates for a work that has been +modified or installed by the recipient, or for the User Product in which it has been +modified or installed. Access to a network may be denied when the modification itself +materially and adversely affects the operation of the network or violates the rules +and protocols for communication across the network. + +Corresponding Source conveyed, and Installation Information provided, in accord with +this section must be in a format that is publicly documented (and with an +implementation available to the public in source code form), and must require no +special password or key for unpacking, reading or copying. + +### 7. Additional Terms + +“Additional permissions” are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. Additional +permissions that are applicable to the entire Program shall be treated as though they +were included in this License, to the extent that they are valid under applicable +law. If additional permissions apply only to part of the Program, that part may be +used separately under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + +When you convey a copy of a covered work, you may at your option remove any +additional permissions from that copy, or from any part of it. (Additional +permissions may be written to require their own removal in certain cases when you +modify the work.) You may place additional permissions on material, added by you to a +covered work, for which you have or can give appropriate copyright permission. + +Notwithstanding any other provision of this License, for material you add to a +covered work, you may (if authorized by the copyright holders of that material) +supplement the terms of this License with terms: + +* **a)** Disclaiming warranty or limiting liability differently from the terms of +sections 15 and 16 of this License; or +* **b)** Requiring preservation of specified reasonable legal notices or author +attributions in that material or in the Appropriate Legal Notices displayed by works +containing it; or +* **c)** Prohibiting misrepresentation of the origin of that material, or requiring that +modified versions of such material be marked in reasonable ways as different from the +original version; or +* **d)** Limiting the use for publicity purposes of names of licensors or authors of the +material; or +* **e)** Declining to grant rights under trademark law for use of some trade names, +trademarks, or service marks; or +* **f)** Requiring indemnification of licensors and authors of that material by anyone +who conveys the material (or modified versions of it) with contractual assumptions of +liability to the recipient, for any liability that these contractual assumptions +directly impose on those licensors and authors. + +All other non-permissive additional terms are considered “further +restrictions” within the meaning of section 10. If the Program as you received +it, or any part of it, contains a notice stating that it is governed by this License +along with a term that is a further restriction, you may remove that term. If a +license document contains a further restriction but permits relicensing or conveying +under this License, you may add to a covered work material governed by the terms of +that license document, provided that the further restriction does not survive such +relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in +the relevant source files, a statement of the additional terms that apply to those +files, or a notice indicating where to find the applicable terms. + +Additional terms, permissive or non-permissive, may be stated in the form of a +separately written license, or stated as exceptions; the above requirements apply +either way. + +### 8. Termination + +You may not propagate or modify a covered work except as expressly provided under +this License. Any attempt otherwise to propagate or modify it is void, and will +automatically terminate your rights under this License (including any patent licenses +granted under the third paragraph of section 11). + +However, if you cease all violation of this License, then your license from a +particular copyright holder is reinstated **(a)** provisionally, unless and until the +copyright holder explicitly and finally terminates your license, and **(b)** permanently, +if the copyright holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + +Moreover, your license from a particular copyright holder is reinstated permanently +if the copyright holder notifies you of the violation by some reasonable means, this +is the first time you have received notice of violation of this License (for any +work) from that copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + +Termination of your rights under this section does not terminate the licenses of +parties who have received copies or rights from you under this License. If your +rights have been terminated and not permanently reinstated, you do not qualify to +receive new licenses for the same material under section 10. + +### 9. Acceptance Not Required for Having Copies + +You are not required to accept this License in order to receive or run a copy of the +Program. Ancillary propagation of a covered work occurring solely as a consequence of +using peer-to-peer transmission to receive a copy likewise does not require +acceptance. However, nothing other than this License grants you permission to +propagate or modify any covered work. These actions infringe copyright if you do not +accept this License. Therefore, by modifying or propagating a covered work, you +indicate your acceptance of this License to do so. + +### 10. Automatic Licensing of Downstream Recipients + +Each time you convey a covered work, the recipient automatically receives a license +from the original licensors, to run, modify and propagate that work, subject to this +License. You are not responsible for enforcing compliance by third parties with this +License. + +An “entity transaction” is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an organization, or +merging organizations. If propagation of a covered work results from an entity +transaction, each party to that transaction who receives a copy of the work also +receives whatever licenses to the work the party's predecessor in interest had or +could give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if the predecessor +has it or can get it with reasonable efforts. + +You may not impose any further restrictions on the exercise of the rights granted or +affirmed under this License. For example, you may not impose a license fee, royalty, +or other charge for exercise of rights granted under this License, and you may not +initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging +that any patent claim is infringed by making, using, selling, offering for sale, or +importing the Program or any portion of it. + +### 11. Patents + +A “contributor” is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The work thus +licensed is called the contributor's “contributor version”. + +A contributor's “essential patent claims” are all patent claims owned or +controlled by the contributor, whether already acquired or hereafter acquired, that +would be infringed by some manner, permitted by this License, of making, using, or +selling its contributor version, but do not include claims that would be infringed +only as a consequence of further modification of the contributor version. For +purposes of this definition, “control” includes the right to grant patent +sublicenses in a manner consistent with the requirements of this License. + +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license +under the contributor's essential patent claims, to make, use, sell, offer for sale, +import and otherwise run, modify and propagate the contents of its contributor +version. + +In the following three paragraphs, a “patent license” is any express +agreement or commitment, however denominated, not to enforce a patent (such as an +express permission to practice a patent or covenant not to sue for patent +infringement). To “grant” such a patent license to a party means to make +such an agreement or commitment not to enforce a patent against the party. + +If you convey a covered work, knowingly relying on a patent license, and the +Corresponding Source of the work is not available for anyone to copy, free of charge +and under the terms of this License, through a publicly available network server or +other readily accessible means, then you must either **(1)** cause the Corresponding +Source to be so available, or **(2)** arrange to deprive yourself of the benefit of the +patent license for this particular work, or **(3)** arrange, in a manner consistent with +the requirements of this License, to extend the patent license to downstream +recipients. “Knowingly relying” means you have actual knowledge that, but +for the patent license, your conveying the covered work in a country, or your +recipient's use of the covered work in a country, would infringe one or more +identifiable patents in that country that you have reason to believe are valid. + +If, pursuant to or in connection with a single transaction or arrangement, you +convey, or propagate by procuring conveyance of, a covered work, and grant a patent +license to some of the parties receiving the covered work authorizing them to use, +propagate, modify or convey a specific copy of the covered work, then the patent +license you grant is automatically extended to all recipients of the covered work and +works based on it. + +A patent license is “discriminatory” if it does not include within the +scope of its coverage, prohibits the exercise of, or is conditioned on the +non-exercise of one or more of the rights that are specifically granted under this +License. You may not convey a covered work if you are a party to an arrangement with +a third party that is in the business of distributing software, under which you make +payment to the third party based on the extent of your activity of conveying the +work, and under which the third party grants, to any of the parties who would receive +the covered work from you, a discriminatory patent license **(a)** in connection with +copies of the covered work conveyed by you (or copies made from those copies), or **(b)** +primarily for and in connection with specific products or compilations that contain +the covered work, unless you entered into that arrangement, or that patent license +was granted, prior to 28 March 2007. + +Nothing in this License shall be construed as excluding or limiting any implied +license or other defenses to infringement that may otherwise be available to you +under applicable patent law. + +### 12. No Surrender of Others' Freedom + +If conditions are imposed on you (whether by court order, agreement or otherwise) +that contradict the conditions of this License, they do not excuse you from the +conditions of this License. If you cannot convey a covered work so as to satisfy +simultaneously your obligations under this License and any other pertinent +obligations, then as a consequence you may not convey it at all. For example, if you +agree to terms that obligate you to collect a royalty for further conveying from +those to whom you convey the Program, the only way you could satisfy both those terms +and this License would be to refrain entirely from conveying the Program. + +### 13. Use with the GNU Affero General Public License + +Notwithstanding any other provision of this License, you have permission to link or +combine any covered work with a work licensed under version 3 of the GNU Affero +General Public License into a single combined work, and to convey the resulting work. +The terms of this License will continue to apply to the part which is the covered +work, but the special requirements of the GNU Affero General Public License, section +13, concerning interaction through a network will apply to the combination as such. + +### 14. Revised Versions of this License + +The Free Software Foundation may publish revised and/or new versions of the GNU +General Public License from time to time. Such new versions will be similar in spirit +to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies that +a certain numbered version of the GNU General Public License “or any later +version” applies to it, you have the option of following the terms and +conditions either of that numbered version or of any later version published by the +Free Software Foundation. If the Program does not specify a version number of the GNU +General Public License, you may choose any version ever published by the Free +Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU +General Public License can be used, that proxy's public statement of acceptance of a +version permanently authorizes you to choose that version for the Program. + +Later license versions may give you additional or different permissions. However, no +additional obligations are imposed on any author or copyright holder as a result of +your choosing to follow a later version. + +### 15. Disclaimer of Warranty + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER +EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE +QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE +DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +### 16. Limitation of Liability + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY +COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS +PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, +INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE +OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE +WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + +### 17. Interpretation of Sections 15 and 16 + +If the disclaimer of warranty and limitation of liability provided above cannot be +given local legal effect according to their terms, reviewing courts shall apply local +law that most closely approximates an absolute waiver of all civil liability in +connection with the Program, unless a warranty or assumption of liability accompanies +a copy of the Program in return for a fee. + +_END OF TERMS AND CONDITIONS_ + +## How to Apply These Terms to Your New Programs + +If you develop a new program, and you want it to be of the greatest possible use to +the public, the best way to achieve this is to make it free software which everyone +can redistribute and change under these terms. + +To do so, attach the following notices to the program. It is safest to attach them +to the start of each source file to most effectively state the exclusion of warranty; +and each file should have at least the “copyright” line and a pointer to +where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + +If the program does terminal interaction, make it output a short notice like this +when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type 'show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type 'show c' for details. + +The hypothetical commands `show w` and `show c` should show the appropriate parts of +the General Public License. Of course, your program's commands might be different; +for a GUI interface, you would use an “about box”. + +You should also get your employer (if you work as a programmer) or school, if any, to +sign a “copyright disclaimer” for the program, if necessary. For more +information on this, and how to apply and follow the GNU GPL, see +<>. + +The GNU General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may consider it +more useful to permit linking proprietary applications with the library. If this is +what you want to do, use the GNU Lesser General Public License instead of this +License. But first, please read +<>. +","Markdown" +"Nowcasting","covid-19-Re/estimateR","man/examples/make_tibble_from_output.R",".R","447","17","## Basic usage of make_tibble_from_output + +smoothed_incidence <- smooth_incidence( + incidence_data = HK_incidence_data$case_incidence, + smoothing_method = ""LOESS"" +) + +smoothed_incidence_tibble_1 <- make_tibble_from_output(smoothed_incidence) + + +## Advanced usage of make_tibble_from_output + +smoothed_incidence_tibble_2 <- make_tibble_from_output( + output = smoothed_incidence, + output_name = ""incidence"", + ref_date = HK_incidence_data$date[1] +)","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/convolve_delays.R",".R","2031","55","## Convolving the delay between infection and onset of symptoms with the delay +# between onset of symptoms and case report to obtain the final delay between +# infection and case report. Using the resulting delay distribution to recover +# the original infection events + +smoothed_incidence <- smooth_incidence(HK_incidence_data$case_incidence) + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + +total_delay_1 <- convolve_delays(list(delay_incubation, delay_onset_to_report)) + +deconvolved_incidence <- deconvolve_incidence( + incidence_data = smoothed_incidence, + delay = total_delay_1 +) + +## Convolving multiple delays +# In this example it is assumed that the delay between infection and case report +# is composed of three delays; the delay between infection and symptom onset, +# the delay between symptom onset and case testing + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +delay_onset_to_test_taken <- list(name = ""norm"", mean = 5, sd = 2) + +delay_test_to_report <- list(name=""norm"", mean = 2, sd = 0.5) + +total_delay_2 <- convolve_delays(list(delay_incubation, + delay_onset_to_test_taken, + delay_test_to_report)) + +## Convolving delays of multiple types +# Defining the incubation period as a probability vector, and the delay between +# symptom onset and case observation as a delay matrix + +delay_incubation <- c(0.01, 0.1, 0.15, 0.18, 0.17, 0.14, 0.11, 0.07, 0.035, 0.020, 0.015) + +delay_matrix <- get_matrix_from_empirical_delay_distr( + HK_delay_data, + n_report_time_steps = 50 +) + +total_delay_3 <- convolve_delays(list(delay_incubation, delay_matrix)) + +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/deconvolve_incidence.R",".R","2406","68","smoothed_onset_incidence <- smooth_incidence(HK_incidence_data$onset_incidence) +smoothed_case_incidence <- smooth_incidence(HK_incidence_data$case_incidence) + +## Deconvolving symptom onset data. +# In case the data to be deconvolved represents noisy observations of symptom +# onset, only the delay distribution of the incubation time needs to be specified +# (time that passes between case incidence and showing of symptoms). + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +deconvolved_incidence_1 <- deconvolve_incidence( + incidence_data = smoothed_onset_incidence, + delay = delay_incubation +) + + +## Deconvolving report incidence data. +# In case the data to be deconvolved represents noisy observations of case reports, +# both the delay distribution of the incubation time and the delay distribution +# of the time that passes between symptom onset and the case being reported. + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + +deconvolved_incidence_2 <- deconvolve_incidence( + incidence_data = smoothed_case_incidence, + delay = list(delay_incubation, delay_onset_to_report) +) + + +## Other available formats for specifying delay distributions + +# Discretized delay distribution vector +mean_incubation = 5.2 +std_incubation = 1.6 +delay_distribution_incubation <- list(name=""norm"", + mean = mean_incubation, + sd = std_incubation) +delay_incubation_vector <- build_delay_distribution(delay_distribution_incubation) + +deconvolved_incidence_3 <- deconvolve_incidence( + incidence_data = smoothed_onset_incidence, + delay = delay_incubation_vector +) + +# Discretized delay distribution matrix +delay_distribution_matrix <- get_matrix_from_empirical_delay_distr( + HK_delay_data, + n_report_time_steps = length(smoothed_case_incidence) +) +deconvolved_incidence_4 <- deconvolve_incidence( + incidence_data = smoothed_case_incidence, + delay = list(delay_incubation, delay_distribution_matrix) +) + +# Dataframe containing empirical delay data +deconvolved_incidence_5 <- deconvolve_incidence( + incidence_data = smoothed_case_incidence, + delay = list(delay_incubation, HK_delay_data) +) + + +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/simulate_infections.R",".R","785","24","## Basic usage of simulate_infections +# Simulating infection incidence corresponding to a drop in Re value from 2.3 +# to 0.5, at the half of the time period, then recovering the Re values using the +# estimate_Re function + +Re_evolution <- c(rep(2.3, 100), rep(0.5, 100)) +simulated_incidence_1 <- simulate_infections( + Rt = Re_evolution +) +Re_recovered_1 <- estimate_Re(simulated_incidence_1) + + +## Advanced usage of simulate_infections +# Simulating infection incidence using the same Re progression as above, but a +# assuming a constant import of 100 cases per day + +imported_infections <- rep(100, length(Re_evolution)) +simulated_incidence_2 <- simulate_infections( + Rt = Re_evolution, + imported_infections = imported_infections +) +Re_recovered_2 <- estimate_Re(simulated_incidence_2) + +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/get_matrix_from_empirical_delay_distr.R",".R","1482","41","## Basic usage of get_matrix_from_empirical_delay_distr +# Obtaining the deconvolved incidence for the full HK incidence data provided in +# the package: obtaining the delay matrix and then using it to recover the +# deconvolved incidence. + +smoothed_incidence <- smooth_incidence(HK_incidence_data$case_incidence) + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +delay_matrix_1 <- get_matrix_from_empirical_delay_distr( + HK_delay_data, + n_report_time_steps = length(smoothed_incidence) +) + +deconvolved_incidence_1 <- deconvolve_incidence( + incidence_data = smoothed_incidence, + delay = list(delay_incubation, delay_matrix_1) +) + + +## Advanced usage of get_matrix_from_empirical_delay_distr +# Obtaining the deconvolved incidence for a section of the HK incidence data +# provided in the package: computing the delay matrix, fitting gamma distributions +# to the columns and then using it to recover the deconvolved incidence for the +# time-frame of interest + +smoothed_partial_incidence <- smooth_incidence(HK_incidence_data[30:90,]$case_incidence) + +delay_matrix_2 <- get_matrix_from_empirical_delay_distr( + HK_delay_data, + n_report_time_steps = length(smoothed_partial_incidence), + fit = ""gamma"", + ref_date = HK_incidence_data[30,]$date +) + +deconvolved_incidence_2 <- deconvolve_incidence( + incidence_data = smoothed_partial_incidence, + delay = list(delay_incubation, delay_matrix_2) +)","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/estimate_Re_from_noisy_delayed_incidence.R",".R","2012","48","## Basic usage of estimate_Re_from_noisy_delayed_incidence +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + +Re_estimate_1 <- estimate_Re_from_noisy_delayed_incidence( + incidence_data = HK_incidence_data$case_incidence, + delay = list(delay_incubation, delay_onset_to_report) +) + +## Advanced usage of estimate_Re_from_noisy_delayed_incidence +# Incorporating prior knowledge over Re. Here, Re is assumed constant over a time +# frame of one week, with a prior mean of 1.25. +Re_estimate_2 <- estimate_Re_from_noisy_delayed_incidence( + incidence_data = HK_incidence_data$case_incidence, + delay = list(delay_incubation, delay_onset_to_report), + estimation_method = ""EpiEstim piecewise constant"", + interval_length = 7, + mean_Re_prior = 1.25 +) + +# Incorporating prior knowledge over the disease. Here, the mean of the serial +# interval is assumed to be 5 days, and the standard deviation is assumed to be +# 2.5 days. +Re_estimate_3 <- estimate_Re_from_noisy_delayed_incidence( + incidence_data = HK_incidence_data$case_incidence, + delay = list(delay_incubation, delay_onset_to_report), + mean_serial_interval = 5, + std_serial_interval = 1.25 +) + +# Incorporating prior knowledge over the epidemic. Here, it is assumed that Re +# changes values 4 times during the epidemic, so the intervals over which Re is +# assumed to be constant are passed as a parameter. +last_interval_index <- length(HK_incidence_data$case_incidence) +Re_estimate_4 <- estimate_Re_from_noisy_delayed_incidence( + incidence_data = HK_incidence_data$case_incidence, + delay = list(delay_incubation, delay_onset_to_report), + estimation_method = ""EpiEstim piecewise constant"", + interval_ends = c(50, 75, 100, 160, last_interval_index) +) +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/estimate_from_combined_observations.R",".R","2465","61","shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + + +## Basic usage of estimate_from_combined_observations +Re_estimate_1 <- estimate_from_combined_observations( + partially_delayed_incidence = HK_incidence_data$onset_incidence, + fully_delayed_incidence = HK_incidence_data$report_incidence, + partial_observation_requires_full_observation = TRUE, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report +) + + +## Advanced usage of estimate_from_combined_observations + +# Getting a more verbose result. Adding a date column and returning intermediate +# results as well as the Re estimate. +Re_estimate_2 <- estimate_from_combined_observations( + partially_delayed_incidence = HK_incidence_data$onset_incidence, + fully_delayed_incidence = HK_incidence_data$report_incidence, + partial_observation_requires_full_observation = TRUE, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + ref_date = HK_incidence_data$date[1], + output_Re_only = FALSE +) + +# Incorporating prior knowledge over Re. Here, Re is assumed constant over a time +# frame of one week, with a prior mean of 1.25. +Re_estimate_3 <- estimate_from_combined_observations( + partially_delayed_incidence = HK_incidence_data$onset_incidence, + fully_delayed_incidence = HK_incidence_data$report_incidence, + partial_observation_requires_full_observation = TRUE, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + estimation_method = 'EpiEstim piecewise constant', + interval_length = 7, + mean_Re_prior = 1.25 +) + +# Incorporating prior knowledge over the disease. Here, the mean of the serial +# interval is assumed to be 5 days, and the standard deviation is assumed to be +# 2.5 days. +Re_estimate_4 <- estimate_from_combined_observations( + partially_delayed_incidence = HK_incidence_data$onset_incidence, + fully_delayed_incidence = HK_incidence_data$report_incidence, + partial_observation_requires_full_observation = TRUE, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + mean_serial_interval = 5, + std_serial_interval = 2.5 +) +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/simulate_combined_observations.R",".R","1313","35","## Basic use of simulate_combined_observations +# Simulating combined observations, assuming two gamma delays between infection +# and symptom onset, and symptom onset and case report respectively. It is assumed +# that 20% of the cases are observed as partially-delayed observations. + +Re_evolution <- c(rep(2.3, 100)) +incidence <- simulate_infections(Re_evolution) + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) +simulated_combined_observations_1 <- simulate_combined_observations( + incidence, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + prob_partial_observation = 0.2 +) + +## Advanced use of simulate_combined_observations +# Adding gaussian noise to the combined observations simulated above. +simulated_combined_observations_2 <- simulate_combined_observations( + incidence, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + prob_partial_observation = 0.2, + noise = list(type = 'gaussian', sd = 0.8) +) + +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/get_bootstrap_replicate.R",".R","468","18","## Basic usage of get_bootstrap_replicate + +bootstrap_replicate_1 <- get_bootstrap_replicate( + HK_incidence_data$case_incidence +) + + +## Advanced usage of get_bootstrap_replicate +# Generate a bootstrap replicate of the incidence data, where case numbers are +# allowed to be decimal numbers, and the output is return as a list. + +bootstrap_replicate_2 <- get_bootstrap_replicate( + HK_incidence_data$case_incidence, + simplify_output = FALSE, + round_incidence = FALSE +) + +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/get_bootstrapped_estimates_from_combined_observations.R",".R","2338","59","## Basic usage of get_bootstrapped_estimates_from_combined_observations +# (Only 10 bootstrap replicates are generated to keep the code fast. In practice, +# use more.) + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + + +Re_estimate_1 <- get_bootstrapped_estimates_from_combined_observations( + partially_delayed_incidence = HK_incidence_data$onset_incidence, + fully_delayed_incidence = HK_incidence_data$report_incidence, + partial_observation_requires_full_observation = TRUE, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + N_bootstrap_replicates = 10 +) + + +## Advanced usage of get_bootstrapped_estimates_from_combined_observations +# Incorporating prior knowledge over Re. Here, Re is assumed constant over a time +# frame of one week, with a prior mean of 1.25. +Re_estimate_2 <- get_bootstrapped_estimates_from_combined_observations( + partially_delayed_incidence = HK_incidence_data$onset_incidence, + fully_delayed_incidence = HK_incidence_data$report_incidence, + partial_observation_requires_full_observation = TRUE, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + N_bootstrap_replicates = 10, + estimation_method = 'EpiEstim piecewise constant', + interval_length = 7, + mean_Re_prior = 1.25, + ref_date = HK_incidence_data$date[1] +) + + +# Incorporating prior knowledge over the disease. Here, we assume the mean of the +# serial interval to be 5 days, and the deviation is assumed to be 2.5 days. The +# delay between symptom onset and case confirmation is passed as empirical data. +Re_estimate_3 <- get_bootstrapped_estimates_from_combined_observations( + partially_delayed_incidence = HK_incidence_data$onset_incidence, + fully_delayed_incidence = HK_incidence_data$report_incidence, + partial_observation_requires_full_observation = TRUE, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + N_bootstrap_replicates = 10, + mean_serial_interval = 5, + std_serial_interval = 2.5 +) + + + +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/get_block_bootstrapped_estimate.R",".R","1859","54","## Basic usage of get_block_bootstrapped_estimate +# (Only 10 bootstrap replicates are generated to keep the code fast. In practice, +# use more.) + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + + +Re_estimate_1 <- get_block_bootstrapped_estimate( + HK_incidence_data$case_incidence, + N_bootstrap_replicates = 10, + delay = list(delay_incubation, delay_onset_to_report) +) + + +## Advanced usage of get_block_bootstrapped_estimate +# (Only 10 bootstrap replicates are generated to keep the code fast. In practice, +# use more.) + + +# Incorporating prior knowledge over Re. Here, Re is assumed constant over a time +# frame of one week, with a prior mean of 1.25. + +Re_estimate_2 <- get_block_bootstrapped_estimate( + HK_incidence_data$case_incidence, + N_bootstrap_replicates = 10, + delay = list(delay_incubation, HK_delay_data), + ref_date = HK_incidence_data$date[1], + estimation_method = 'EpiEstim piecewise constant', + interval_length = 7, + uncertainty_summary_method = 'bagged mean - CI from bootstrap estimates', + mean_Re_prior = 1.25 +) + +# Incorporating prior knowledge over the disease. Here, we assume the mean of the +# serial interval to be 5 days, and the deviation is assumed to be 2.5 days. The +# delay between symptom onset and case confirmation is passed as empirical data. + +Re_estimate_3 <- get_block_bootstrapped_estimate( + HK_incidence_data$case_incidence, + N_bootstrap_replicates = 10, + delay = list(delay_incubation, HK_delay_data), + ref_date = HK_incidence_data$date[1], + mean_serial_interval = 5, + std_serial_interval = 2.5 +) +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/simulate_delayed_observations.R",".R","1314","40","## Basic usage of simulate_delayed_observations +# Simulating a series of delayed observations of infections generated by an +# infection with a Re of 1.2. The delays of the observations follow a normal +# distribution. +set.seed(7) +infections <- simulate_infections(rep(1.5, 100)) + + +delay <- list(name=""norm"", mean = 7, sd = 2) + +delayed_observations_1 <- simulate_delayed_observations( + infections, + delay = delay +) + +## Advanced usage of simulate_delayed_observations +# Simulating delayed observations using the same infections as above, but assuming +# the observation is delayed by a convolution of two different delays + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + +delayed_observations_2 <- simulate_delayed_observations( + infections, + delay = list(delay_incubation, delay_onset_to_report) +) + +# Simulating noisy delayed observations, assuming a gaussian noise +delayed_observations_3 <- simulate_delayed_observations( + infections, + delay = delay, + noise = list(type = 'gaussian', sd = 0.8) +)","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/get_infections_from_incidence.R",".R","1155","33","## Basic usage of get_infections_from_incidence +# Recovering infection events from case incidence data assuming distinct gamma +# distributions for the delay between infection and symptom onset, and the delay +# between symptom onset and case reporting. + +shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + +infections_1 <- get_infections_from_incidence( + HK_incidence_data$case_incidence, + delay = list(delay_incubation, delay_onset_to_report) +) + + +## Advanced usage of get_infections_from_incidence +# Recovering infection events from symptom onset data, assuming the same delay +# distributions as above + +infections_2 <- get_infections_from_incidence( + HK_incidence_data$onset_incidence, + delay = delay_incubation, + is_partially_reported_data = TRUE, + delay_until_final_report = delay_onset_to_report, + ref_date = HK_incidence_data$date[1] +) +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/nowcast.R",".R","900","27","## Basic usage of nowcast + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + +corrected_incidence_data_1 <- nowcast( + incidence_data = HK_incidence_data$onset_incidence, + delay_until_final_report = delay_onset_to_report +) + + +## Advanced usage of nowcast +# Only taking into account cases that have a chance of being observed greater +# than 25%. Here, the delay between symptom onset and report is given as +# empirical delay data, hence it is needed to specify the date of the first +# entry in incidence_data + +corrected_incidence_data_2 <- nowcast( + incidence_data = HK_incidence_data$onset_incidence, + delay_until_final_report = HK_delay_data, + ref_date = HK_incidence_data$date[1], + cutoff_observation_probability = 0.25 +) +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/estimate_Re.R",".R","2239","64","## Building incidence_data +# estimate_Re assumes incidence_data represents infections, not delayed noisy +# observations of infections. Thus, we need to first smooth the incidence data +# and then perform a deconvolution step. For more details, see the smooth_incidence +# and deconvolve_incidence functions. + +shape_incubation <- 3.2 +scale_incubation <- 1.3 +delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + +shape_onset_to_report = 2.7 +scale_onset_to_report = 1.6 +delay_onset_to_report <- list(name=""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report) + +smoothed_incidence <- smooth_incidence(HK_incidence_data$case_incidence) +deconvolved_incidence <- deconvolve_incidence( + smoothed_incidence, + delay = list(delay_incubation, delay_onset_to_report) +) + + +## Basic usage of estimate_Re +Re_estimate_1 <- estimate_Re(incidence_data = deconvolved_incidence) + + +## Advanced usage of estimate_Re +# Incorporating prior knowledge over Re. Here, Re is assumed constant over a time +# frame of one week, with a prior mean of 1.25. +Re_estimate_2 <- estimate_Re( + incidence_data = deconvolved_incidence, + estimation_method = 'EpiEstim piecewise constant', + interval_length = 7, + mean_Re_prior = 1.25 +) + +# Incorporating prior knowledge over the disease. Here, the mean of the serial +# interval is assumed to be 5 days, and the standard deviation is assumed to be +# 2.5 days. +Re_estimate_3 <- estimate_Re( + incidence_data = deconvolved_incidence, + mean_serial_interval = 5, + std_serial_interval = 2.5 +) + +# Incorporating prior knowledge over the epidemic. Here, it is assumed that Re +# changes values 4 times during the epidemic, so the intervals over which Re is +# assumed to be constant are passed as a parameter. +last_interval_index <- length(deconvolved_incidence$values) + + deconvolved_incidence$index_offset + +Re_estimate_4 <- estimate_Re( + incidence_data = deconvolved_incidence, + estimation_method = ""EpiEstim piecewise constant"", + interval_ends = c(50, 75, 100, 160, last_interval_index) +) + +# Recovering the Re HPD as well. +Re_estimate_5 <- estimate_Re( + incidence_data = deconvolved_incidence, + output_HPD = TRUE +) +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/merge_outputs.R",".R","775","28","shape_incubation = 3.2 +scale_incubation = 1.3 +delay_incubation <- list(name=""gamma"", shape = shape_incubation, scale = scale_incubation) + +smoothed_incidence <- smooth_incidence(HK_incidence_data$onset_incidence) +deconvolved_incidence <- deconvolve_incidence( + smoothed_incidence, + delay = delay_incubation +) + +## Basic usage of merge_outputs + +merged_incidence_1 <- merge_outputs( + list(""smoothed symptom onset"" = smoothed_incidence, + ""deconvolved symptom onset"" = deconvolved_incidence) +) + + +## Advanced usage of merge_outputs + +merged_incidence_2 <- merge_outputs( + list(""smoothed symptom onset"" = smoothed_incidence, + ""deconvolved symptom onset"" = deconvolved_incidence), + ref_date = HK_incidence_data$date[1], + include_index = TRUE, + index_col = ""index"" +) +","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/smooth_incidence.R",".R","614","23","## Basic usage of smooth_incidence + +smoothed_incidence_1 <- smooth_incidence( + incidence_data = HK_incidence_data$case_incidence, + smoothing_method = ""LOESS"" +) + + +## Advanced usage of smooth_incidence +# Smoothing the incidence using a LOESS window of 15 days, fitting polynomials +# of degree 2 in the LOESS algorithm, and averaging the initial Re estimate over +# the first 7 days. + +smoothed_incidence_2 <- smooth_incidence( + incidence_data = HK_incidence_data$case_incidence, + smoothing_method = ""LOESS"", + simplify_output = FALSE, + data_points_incl = 15, + degree = 2, + initial_Re_estimate_window = 7 +) + + ","R" +"Nowcasting","covid-19-Re/estimateR","man/examples/build_delay_distribution.R",".R","504","11","## Obtaining the discretized probabiility vector for different distributions + +gamma_distribution <- list(name= ""gamma"", shape = 3.2, scale = 1.3) +delay_distribution_vector_1 <- build_delay_distribution(gamma_distribution) + +normal_distribution <- list(name = ""norm"", mean = 5, sd = 2) +delay_distribution_vector_2 <- build_delay_distribution(normal_distribution) + +uniform_distribution <- list(name = ""unif"", min = 0.5, max = 4) +delay_distribution_vector_3 <- build_delay_distribution(uniform_distribution) +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-validation.R",".R","28697","701","# List containing predefined accepted string inputs for exported functions, for parameters for which validity is tested using the.is_value_in_accepted_values_vector() function +accepted_parameter_value <- list( + smoothing_method = c(""LOESS"", ""none""), + deconvolution_method = c(""Richardson-Lucy delay distribution"", ""none""), + estimation_method = c(""EpiEstim sliding window"", ""EpiEstim piecewise constant""), + bootstrapping_method = c(""non-parametric block boostrap"", ""none""), + function_prefix = c(""d"", ""q"", ""p"", ""r""), + uncertainty_summary_method = c(""original estimate - CI from bootstrap estimates"", ""bagged mean - CI from bootstrap estimates""), + fit = c(""none"", ""gamma"") +) + +#' Check that an object represents a probability distribution. +#' +#' To pass the check: +#' 1) the object must be a numeric vector +#' 2) its elements must sum to 1 +#' 3) it must not contain any strictly-negative value. +#' 4) (optionally) it must not contain NAs. +#' +#' @param distribution Input for which we need to check that it is a proper probability distribution. +#' @param tolerate_NAs Can the distribution contain NA values? +#' @param tolerance_on_sum Numeric tolerance in checking that vector elements sum to 1. +#' +#' @inherit validation_utility_params +.check_is_probability_distr_vector <- function(distribution, tolerate_NAs = FALSE, tolerance_on_sum = 1E-2, parameter_name = deparse(substitute(distribution))) { + .check_class_parameter_name(distribution, ""vector"", parameter_name, mode = ""numeric"") + if (!tolerate_NAs && any(is.na(distribution))) { + stop(""Not a proper delay distribution vector. Contains one or more NAs."") + } + + if (!isTRUE(all.equal(1, sum(distribution, na.rm = TRUE), tolerance = tolerance_on_sum))) { + stop(""Not a proper delay distribution vector. Does not sum to 1."") + } + + if (any(distribution < 0, na.rm = TRUE)) { + stop(""Not a proper delay distribution vector. Contains negative values."") + } + + return(TRUE) +} + + +#' Check whether the class of an object is as expected +#' +#' @param object An object whose class needs checking, +#' @param proper_class A string describing the desired class of \code{object}. +#' @param mode Optional. A string describing the desired mode of \code{object}. +#' Use only if \code{proper_class} is \code{vector}. Mode cannot be \code{Date}. +#' Use \code{proper_class = ""Date""} for checking class of \code{Date vector}. +#' +#' @return TRUE if no error is thrown. +.check_class <- function(object, proper_class, mode = ""any"") { + if (""character"" %!in% class(proper_class) || length(proper_class) > 1) { + stop(""'proper_class' must be a single string."") + } + + if (""character"" %!in% class(mode) || length(mode) > 1) { + stop(""'mode' must be a single string."") + } + + if (proper_class == ""vector"") { + if (mode == ""Date"") { + stop(""Mode cannot be 'Date'."") + } + + if (!is.vector(object, mode = mode)) { + stop(paste0(deparse(substitute(object)), "" must be a "", mode, "" vector."")) + } + + return(TRUE) + } + + # validation function + is_proper_class <- get(paste0(""is."", proper_class), envir = loadNamespace(""lubridate"")) # need lubridate in case proper_class is Date + + if (!is_proper_class(object)) { + # deparse(substitute(...)) lets you do basically the reverse of get(..) + stop(paste0(deparse(substitute(object)), "" must be a "", proper_class, ""."")) + } + + return(TRUE) +} + +# TODO add checks for other distributions (lognormal, uniform, weibull, truncated_normal,...) +# TODO reconsider if can return FALSE +#' Check if valid distribution list +#' +#' +#' +#' @inheritParams distribution +#' @inheritParams validation_utility_params +#' +#' @return boolean. Returns FALSE if parameter values return an improper distribution (if gamma distr). Throws an error if not a list, or not a list with the appropriate elements. Returns TRUE otherwise. +.is_valid_distribution <- function(distribution, parameter_name = deparse(substitute(distribution))) { + .check_class_parameter_name(distribution, ""list"", parameter_name) + + if (!""name"" %in% names(distribution)) { + stop(""Missing distribution name. Include a 'name' element in distribution."") + } + + distribution_name <- distribution[[""name""]] + + density_function_name <- paste0(""d"", distribution_name) + density_function <- try(get(density_function_name, envir = loadNamespace(""stats"")), + silent = TRUE + ) + + if (class(density_function) == ""try-error"") { + stop(paste(""The "", density_function_name, "" function must be defined in the 'stats' package."")) + } + + distribution_parms <- .get_distribution_parms(distribution, density_function) + + if (length(distribution_parms) == 0) { + stop(""Missing distribution parameters."") + } + + # Check if parameter values are pathological. + if (distribution_name == ""gamma"") { + if (distribution_parms[[""shape""]] < 0) { + return(FALSE) + } else if (""scale"" %in% names(distribution_parms) && distribution_parms[[""scale""]] <= 0) { + return(FALSE) + } else { + return(TRUE) + } + } + + return(TRUE) +} + +#' Check if input is in the proper empirical delay data format +#' +#' If the \code{delay} input is not a dataframe, return \code{FALSE}. +#' Otherwise, an error is thrown if \code{delay} does not follow the expected format. +#' +#' @inherit empirical_delay_data_format details +#' @inherit validation_utility_params +#' @param delay object to be tested +#' +#' @return boolean. \code{TRUE} if the input is a dataframe in the proper format. +.check_is_empirical_delay_data <- function(delay, parameter_name = deparse(substitute(distribution))) { + if (is.data.frame(delay)) { + if (""event_date"" %!in% colnames(delay)) { + stop(""Missing 'event_date' column in dataframe."") + } + .check_class_parameter_name(delay$event_date, ""Date"", parameter_name) + + if (""report_delay"" %!in% colnames(delay)) { + stop(""Missing 'report_delay' column in dataframe."") + } + .check_class_parameter_name(delay$report_delay, ""vector"", parameter_name, mode = ""numeric"") + + if (any(is.na(delay$event_date)) || any(is.na(delay$report_delay))) { + stop(""Empirical delay data contains NA values."") + } + + if (any(delay$report_delay < 0)) { + stop(""'report_delay' column contains negative values."") + } + + return(TRUE) + } else { + return(FALSE) + } +} + + +#' Check if object is numeric vector. +#' +#' @param object Any object. +#' +#' @return TRUE if numeric vector, FALSE otherwise +.is_numeric_vector <- function(object) { + return(is.vector(object, mode = ""numeric"")) +} + + +#' @description Utility function that checks if a specific user given parameter value is among the accepted ones, in which case it returns TRUE +#' Throws an error otherwise. +#' +#' @inherit validation_utility_params +.is_value_in_accepted_values_vector <- function(string_user_input, parameter_name) { + if (!is.character(string_user_input)) { + stop(paste(""Expected parameter"", parameter_name, ""to be a string."")) + } + if (!(string_user_input %in% accepted_parameter_value[[parameter_name]])) { + stop(paste(""Expected parameter"", parameter_name, ""to have one of the following values:"", toString(accepted_parameter_value[[parameter_name]]), "". Given input was:"", string_user_input)) + } + return(TRUE) +} + +#' @description Utility function that checks if a specific user given parameter value an accepted time_step, in which case it returns TRUE +#' An accepted time_step is considered to be: +#' <> +#' (from \url{https://www.rdocumentation.org/packages/base/versions/3.6.2/topics/seq.Date}) +#' @inherit validation_utility_params +#' +.is_value_valid_time_step <- function(string_user_input, parameter_name) { + if (!is.character(string_user_input)) { + stop(paste(""Expected parameter"", parameter_name, ""to be a string."")) + } + is_valid_time_step <- grepl(""^([+]?\\d+ )?(day|week|month|quarter|year)s?$"", string_user_input) + if (!is_valid_time_step) { + stop(paste(""Expected parameter"", parameter_name, ""to be a character string, containing one of \""day\"", \""week\"", \""month\"", \""quarter\"" or \""year\"". This can optionally be preceded by a positive integer and a space, or followed by \""s\""."")) + } + return(TRUE) +} + + +#' Utility functions for input validity. +#' @description Utility function to determine whether an object is a numeric vector with all positive (or zero) values. +#' +#' @param vector vector to be tested +#' +#' @return boolean. TRUE if vector is a positive numeric vector. FALSE otherwise +.is_positive_numeric_vector <- function(vector) { + if (!is.vector(vector, mode = ""numeric"")) { + return(FALSE) + } + if(any(is.na(vector))) { + return(FALSE) + } + if (!all(vector >= 0)) { + return(FALSE) + } + return(TRUE) +} + + +#' @description Utility function that checks if a user input is one of: +#' \itemize{ +#' \item a numeric vector with values > 0 +#' \item a list with two elements: \code{values} (a numeric vector with values > 0) and \code{index_offset} (an integer) +#' } +#' @inherit validation_utility_params +#' @param module_input_object the vector/list the user passed as a parameter, to be tested +#' +.is_valid_module_input <- function(module_input_object, parameter_name) { + if (is.list(module_input_object)) { + if (""values"" %!in% names(module_input_object)) { + stop(paste(""When passed as a list,"", parameter_name, ""has to contain a $values element."")) + } + + if (""index_offset"" %!in% names(module_input_object)) { + stop(paste(""When passed as a list,"", parameter_name, ""has to contain a $index_offset element."")) + } + + if (!.is_positive_numeric_vector(module_input_object$values)) { + stop(paste(""The $values element of"", parameter_name, ""has to be a numeric vector with values greater or equal to 0."")) + } + + if (module_input_object$index_offset != as.integer(module_input_object$index_offset)) { # if index_offset is not an integer + stop(paste(""The $index_offset element of"", parameter_name, ""has to be an integer."")) + } + } else if (is.numeric(module_input_object)) { + if (!.is_positive_numeric_vector(module_input_object)) { + stop(paste(parameter_name, ""has to be a numeric vector with values greater or equal to 0."")) + } + } else { + stop(paste(parameter_name, ""has to be either a numeric vector or a list."")) + } + return(TRUE) +} + +.is_list_of_outputs <- function(output_list) { + if (!is.list(output_list)) { + return(FALSE) + } + + check_if_simple_output <- try(.is_valid_module_input(output_list, deparse(substitute(output_list))), + silent = TRUE + ) + + # Return FALSE if input is an output object itself + if (!(""try-error"" %in% class(check_if_simple_output))) { + return(FALSE) + } + + for (i in 1:length(output_list)) { + test_output_i <- try(.is_valid_module_input(output_list[[i]], names(output_list)[i]), + silent = TRUE + ) + if (""try-error"" %in% class(test_output_i)) { + return(FALSE) + } + } + + return(TRUE) +} + + + +# TODO reconsider whether we need the incidence_data_length here. +# Is it acceptable if dim(matrix) > incidence data length? +# And is it needed to check whether ncol(delay_matrix) < incidence_data_length +#' @description Utility function that checks if a given matrix is a valid delay distribution matrix. +#' For this, the matrix needs to fulfill the following conditions: +#' \itemize{ +#' \item is a numeric matrix +#' \item has no values < 0 +#' \item is a lower triangular matrix +#' \item no column sums up to more than 1 +#' \item no NA values +#' \item the size of the matrix is greater than the length of the incidence data +#' } +#' +#' @inherit validation_utility_params +#' @param delay_matrix A matrix to be tested +#' +.check_is_delay_distribution_matrix <- function(delay_matrix, incidence_data_length, parameter_name) { + if (!is.matrix(delay_matrix) || !is.numeric(delay_matrix)) { + stop(paste(parameter_name, ""needs to be a numeric matrix."")) + } + + if (any(is.na(delay_matrix))) { + stop(paste(parameter_name, ""cannot contain any NA values."")) + } + + if (!all(delay_matrix >= 0)) { + stop(paste(parameter_name, ""needs to contain non-negative values."")) + } + + if (ncol(delay_matrix) != nrow(delay_matrix)) { + stop(paste(parameter_name, ""needs to be a square matrix."")) + } + + if (!all(delay_matrix == delay_matrix * lower.tri(delay_matrix, diag = TRUE))) { # check if matrix is lower triangular + stop(paste(parameter_name, ""needs to be a lower triangular matrix."")) + } + + if (!all(colSums(delay_matrix) <= 1)) { + stop(paste(parameter_name, ""is not a valid delay distribution matrix. At least one column sums up to a value greater than 1."")) + } + + if (ncol(delay_matrix) < incidence_data_length) { + stop(paste(parameter_name, ""needs to have a greater size than the length of the incidence data."")) + } + + return(TRUE) +} + +#' @description Utility function that checks whether a user input is a valid delay object. This means it can be one of the following: +#' \itemize{ +#' \item a probability distribution vector: a numeric vector with no \code{NA} or negative values, whose entries sum up to 1 +#' \item an empirical delay data: a data frame with two columns: \code{event_date} and \code{report_delay}. The columns cannot contain \code{NA} values. \code{report_delay} only contains non-negative values +#' \item a delay distribution matrix (as described in \code{\link{.check_is_delay_distribution_matrix}}) +#' \item a distribution object (e.g. list(name = 'gamma', scale = X, shape = Y)) +#' } +#' @inherit validation_utility_params +#' @param delay_object user inputted object to be tested +#' +.is_valid_delay_object <- function(delay_object, parameter_name, incidence_data_length) { + if (.is_numeric_vector(delay_object)) { + .check_is_probability_distr_vector(delay_object, parameter_name = parameter_name) + } else if (is.data.frame(delay_object)) { + .check_is_empirical_delay_data(delay_object, parameter_name) + } else if (is.matrix(delay_object)) { + .check_is_delay_distribution_matrix(delay_object, incidence_data_length, parameter_name) + } else if (is.list(delay_object)) { + .is_valid_distribution(delay_object, parameter_name) + } else { + stop(paste(""Invalid"", parameter_name, ""input."", parameter_name, ""must be either: + a numeric vector representing a discretized probability distribution, + or a matrix representing discretized probability distributions, + or a distribution object (e.g. list(name = 'gamma', scale = X, shape = Y)), + or empirical delay data."")) + } + return(TRUE) +} + +#' @description Utility function that checks whether a user input is a list of or itself a single valid delay object. +#' This means the user input can be a list in which each element can be one of the following: +#' \itemize{ +#' \item a probability distribution vector: a numeric vector with no \code{NA} or negative values, whose entries sum up to 1 +#' \item an empirical delay data: a data frame with two columns: \code{event_date} and \code{report_delay}. The columns cannot contain \code{NA} values. \code{report_delay} only contains non-negative values +#' \item a delay distribution matrix (as described in \code{\link{.check_is_delay_distribution_matrix}}) +#' \item a distribution object (e.g. list(name = 'gamma', scale = X, shape = Y)) +#' } +#' +#' Or the user input can itself be one of these types. +#' @inherit validation_utility_params +#' @param delay_list user inputted object to be tested +#' +.is_valid_delay_single_or_list <- function(delay_list, parameter_name, incidence_data_length) { + if (is.list(delay_list) && !is.data.frame(delay_list)) { + is_distribution <- try(.is_valid_distribution(delay_list, parameter_name), silent = TRUE) + if (""try-error"" %in% class(is_distribution)) { + is_delay_list <- try(lapply(delay_list, function(delay) { + .is_valid_delay_object(delay, parameter_name, incidence_data_length) + }), silent = TRUE) + + if (""try-error"" %in% class(is_delay_list)) { + stop(paste( + ""Invalid"", parameter_name, + ""Either one of the delay objects is invalid or"", parameter_name, + ""is an invalid distribution object."" + )) + } + } + } else { + .is_valid_delay_object(delay_list, parameter_name, incidence_data_length) + } + return(TRUE) +} + +#' @description Utility function that checks whether a user input is a valid computation-ready delay object. +#' This means it can be one of the following: +#' \itemize{ +#' \item a probability distribution vector: a numeric vector with no \code{NA} or negative values, whose entries sum up to 1 +#' \item a delay distribution matrix (as described in \code{\link{.check_is_delay_distribution_matrix}}) +#' } +#' @inherit validation_utility_params +#' @param delay_object user input object to be tested +#' +.is_valid_computation_ready_delay_object <- function(delay_object, parameter_name, incidence_data_length) { + if (.is_numeric_vector(delay_object)) { + .check_is_probability_distr_vector(delay_object, parameter_name = parameter_name) + } else if (is.matrix(delay_object)) { + .check_is_delay_distribution_matrix(delay_object, incidence_data_length, parameter_name) + } else { + stop(paste(""Invalid"", parameter_name, ""input."", parameter_name, ""must be either: + a numeric vector representing a discretized probability distribution, + or a matrix representing discretized probability distributions."")) + } + return(TRUE) +} + +#' @description Utility function to check whether an object belongs to a particular class. +#' Wrapper function over \code{\link{.check_class}} needed because, being called from \code{\link{.are_valid_argument_values}}, +#' the parameter name will not be the same as the one from the original function. +#' +#' @inherit validation_utility_params +#' @inherit .check_class +#' +.check_class_parameter_name <- function(object, proper_class, parameter_name, mode = ""any"") { + tryCatch( + { + if (length(object) == 1 && is.na(object)) { + stop(""Object was NA"") # This error message is never shown. Overwritten below. + } + .check_class(object, proper_class, mode) + }, + error = function(error) { + stop(paste(""Expected parameter"", parameter_name, ""to be of type"", proper_class, ""and not NA."")) + } + ) + return(TRUE) +} + +#' @description Utility function to check whether an object is null or belongs to a particular class. +#' +#' @inherit validation_utility_params +#' @inherit .check_class +#' +.check_if_null_or_belongs_to_class <- function(object, proper_class, parameter_name, mode = ""any"") { + if (!is.null(object)) { + .check_class_parameter_name(object, proper_class, parameter_name, mode) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object is a number. +#' +#' @inherit validation_utility_params +#' +.check_if_number <- function(number, parameter_name) { + if (!is.numeric(number)) { + stop(paste(parameter_name, ""is expected to be a number."")) + } + if (length(number) > 1) { + stop(paste(parameter_name, ""is expected to be a number."")) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object is a positive number or 0. +#' +#' @inherit validation_utility_params +#' +.check_if_non_negative_number <- function(number, parameter_name) { + .check_if_number(number, parameter_name) + + if (number < 0) { + stop(paste(parameter_name, ""is expected to be positive."")) + } + + return(TRUE) +} + +#' @description Utility function to check whether an object is a strictly positive number +#' +#' @inherit validation_utility_params +#' +.check_if_positive_number <- function(number, parameter_name) { + .check_if_number(number, parameter_name) + + if (number <= 0) { + stop(paste(parameter_name, ""is expected to be strictly positive."")) + } + + return(TRUE) +} + +#' @description Utility function to check whether an object is an integer +#' +#' @inherit validation_utility_params +#' +.check_if_integer_value <- function(number, parameter_name) { + if (round(number) != number || length(number) != 1) { # did not use .check_class_parameter_name since is.integer(1) returns false + stop(paste(parameter_name, ""needs to be an integer value."")) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object is a vectir if integers +#' +#' @inherit validation_utility_params +#' @param vector The value to be tested +#' +.check_if_integer_vector <- function(vector, parameter_name) { + if (isFALSE(all.equal(round(vector), vector))) { # did not use .check_class_parameter_name since is.integer(1) returns false + stop(paste(parameter_name, ""needs to be an integer vector."")) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object is an integer +#' +#' @inherit validation_utility_params +#' @param vector The value to be tested +#' +.check_if_non_neg_integer_vector <- function(vector, parameter_name) { + + .check_if_integer_vector(vector, parameter_name) + if (!all(vector >= 0)) { + stop(paste(parameter_name, ""needs to only contain positive values."")) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object is an integer or null +#' +#' @inherit validation_utility_params +#' +.check_if_null_or_integer <- function(number, parameter_name) { + if (!is.null(number)) { + .check_if_integer_value(number, parameter_name) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object is a strictly positive integer +#' +#' @inherit validation_utility_params +#' +.check_if_positive_integer <- function(number, parameter_name) { + .check_if_positive_number(number, parameter_name) + .check_if_integer_value(number, parameter_name) +} + + +#' @description Utility function to check whether an object is valid noise list +#' +#' @inherit validation_utility_params +#' +.check_if_noise <- function(noise, parameter_name) { + if(!is.list(noise)){ + stop(paste0(""Expected "", parameter_name, "" to be a list."")) + } + + if (""type"" %!in% names(noise)) { + stop(paste0(""Expected "", parameter_name, "" to be a list with a 'type' element."")) + } + + if (noise$type != ""noiseless"" && ""sd"" %!in% names(noise)){ + stop(paste0(""Unless the noise type is 'noiseless', expected "", parameter_name, "" to contain a 'sd' element."")) + } + return(TRUE) +} + +#' @description Utility function to check whether an object is a number that belongs to a given interval +#' +#' @inherit validation_utility_params +#' @param interval_start Left-bound of the accepted interval +#' @param interval_end Right-bound of the accepted interval +#' +.check_is_numeric_in_interval <- function(user_input, parameter_name, interval_start, interval_end) { + .check_if_number(user_input, parameter_name) + if (user_input < interval_start || user_input > interval_end) { + stop(paste0(""Expected "", parameter_name, "" to be in interval ["", interval_start, "", "", interval_end, ""]."")) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object is a valid estimates object. +#' It must be a dataframe and have an index column named \code{index_col_name} that doesn't contain any \code{NA} values. +#' @inherit validation_utility_params +#' @param index_col_name string. Name of the index column in the \code{user_input} dataframe. +#' +.check_is_estimate <- function(user_input, parameter_name, index_col_name) { + .check_class_parameter_name(user_input, ""data.frame"", parameter_name) + + if (index_col_name %!in% names(user_input)) { + stop(paste(""Missing index column. No column named "", index_col_name, ""in"", parameter_name)) + } + + if (any(is.na(user_input[[index_col_name]]))) { + stop(paste(""NA value(s) in column"", index_col_name, ""in"", parameter_name)) + } + return(TRUE) +} + + +#' @description Utility function to check whether an object a valid bootstrap estimates object. +#' It has to be a valid estimates object, and to have the columns specified by \code{col_names}. +#' +#' @inherit validation_utility_params +#' @param col_names vector. Contains the column names of \code{index_col}, \code{bootstrap_id_col} and \code{Re_estimate_col}, as described by the \code{summarise_uncertainty} function. +#' +.check_is_bootstrap_estimate <- function(user_input, parameter_name, col_names) { + Re_estimate_col <- col_names[1] + bootstrap_id_col <- col_names[2] + index_col <- col_names[3] + + .check_is_estimate(user_input, parameter_name, index_col) + + for (i in 1:2) { # the bootstrap_id column name and Re_estimate column name; index column is already checked by .check_is_estimate + if (col_names[i] %!in% names(user_input)) { + stop(paste0(""Missing "", col_names[i], "" column in 'bootstrapped estimates' argument, + or '"", col_names[i], ""' was not set to the corresponding column name."")) + } + } + + return(TRUE) +} + +#' Utility functions for input validity. +#' +#' @description Utility function that checks that the values the user passed when calling a function are valid. +#' +#' @inherit validation_utility_params +#' +.are_valid_argument_values <- function(user_inputs) { + for (i in 1:length(user_inputs)) { + user_input <- user_inputs[[i]][[1]] + input_type <- user_inputs[[i]][[2]] + parameter_name <- deparse(substitute(user_inputs)[[i + 1]][[2]]) + if (length(user_inputs[[i]]) > 2) { + additional_function_parameter <- user_inputs[[i]][[3]] + } + + switch(input_type, + ""smoothing_method"" = .is_value_in_accepted_values_vector(user_input, parameter_name), + ""deconvolution_method"" = .is_value_in_accepted_values_vector(user_input, parameter_name), + ""estimation_method"" = .is_value_in_accepted_values_vector(user_input, parameter_name), + ""uncertainty_summary_method"" = .is_value_in_accepted_values_vector(user_input, parameter_name), + ""bootstrapping_method"" = .is_value_in_accepted_values_vector(user_input, parameter_name), + ""time_step"" = .is_value_valid_time_step(user_input, parameter_name), + ""module_input"" = .is_valid_module_input(user_input, parameter_name), + ""boolean"" = .check_class_parameter_name(user_input, ""logical"", parameter_name), + ""computation_ready_delay_object"" = .is_valid_computation_ready_delay_object(user_input, parameter_name, additional_function_parameter), + ""delay_single_or_list"" = .is_valid_delay_single_or_list(user_input, parameter_name, additional_function_parameter), + ""delay_object"" = .is_valid_delay_object(user_input, parameter_name, additional_function_parameter), + ""number"" = .check_if_number(user_input, parameter_name), + ""non_negative_number"" = .check_if_non_negative_number(user_input, parameter_name), + ""null_or_date"" = .check_if_null_or_belongs_to_class(user_input, ""Date"", parameter_name), + ""null_or_int"" = .check_if_null_or_integer(user_input, parameter_name), + ""positive_integer"" = .check_if_positive_integer(user_input, parameter_name), + ""positive_number"" = .check_if_positive_number(user_input, parameter_name), + ""string"" = .check_if_null_or_belongs_to_class(user_input, ""character"", parameter_name), + ""date"" = .check_class_parameter_name(user_input, ""Date"", parameter_name), + ""integer"" = .check_if_integer_value(user_input, parameter_name), + ""integer_vector"" = .check_if_integer_vector(user_input, parameter_name), + ""non_negative_integer_vector"" = .check_if_non_neg_integer_vector(user_input, parameter_name), + ""distribution"" = .is_valid_distribution(user_input, parameter_name), + ""numeric_between_zero_one"" = .check_is_numeric_in_interval(user_input, parameter_name, 0, 1), + ""function_prefix"" = .is_value_in_accepted_values_vector(user_input, parameter_name), + ""numeric_vector"" = .check_class_parameter_name(user_input, ""vector"", parameter_name, ""numeric""), + ""probability_distr_vector"" = .check_is_probability_distr_vector(user_input, parameter_name = parameter_name), + ""probability_distr_vector_high_tolerance"" = .check_is_probability_distr_vector(user_input, parameter_name = parameter_name, tolerance_on_sum = 1E-2), + ""probability_distr_matrix"" = .check_is_delay_distribution_matrix(user_input, additional_function_parameter, parameter_name), + ""empirical_delay_data"" = .check_is_empirical_delay_data(user_input, parameter_name), + ""estimates"" = .check_is_estimate(user_input, parameter_name, additional_function_parameter), + ""bootstrap_estimates"" = .check_is_bootstrap_estimate(user_input, parameter_name, additional_function_parameter), + ""delay_matrix_column_fit"" = .is_value_in_accepted_values_vector(user_input, parameter_name), + ""noise"" = .check_if_noise(user_input, parameter_name), + stop(paste(""Checking function for type"", input_type, ""not found."")) + ) + } + return(TRUE) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/smooth.R",".R","3965","108","#' Smooth noisy incidence data +#' +#' Smooth a time series of noisy observations. +#' Currently only LOESS smoothing (\code{smoothing_method = ""LOESS""}) is implemented. +#' +#' @example man/examples/smooth_incidence.R +#' +#' @inheritParams module_methods +#' @inherit module_structure +#' @inheritDotParams .smooth_LOESS -incidence_input +#' +#' @export +smooth_incidence <- function(incidence_data, + smoothing_method = ""LOESS"", + simplify_output = TRUE, + ...) { + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(smoothing_method, ""smoothing_method""), + list(simplify_output, ""boolean"") + )) + + + dots_args <- .get_dots_as_list(...) + input <- .get_module_input(incidence_data) + + if (smoothing_method == ""LOESS"") { + smoothed_incidence <- do.call( + "".smooth_LOESS"", + c( + list(incidence_input = input), + .get_shared_args(.smooth_LOESS, dots_args) + ) + ) + } else if (smoothing_method == ""none"") { + smoothed_incidence <- input + } else { + smoothed_incidence <- .make_empty_module_output() + } + + if (simplify_output) { + smoothed_incidence <- .simplify_output(smoothed_incidence) + } + + return(smoothed_incidence) +} + +#' LOESS smoothing function +#' +#' Prefer the use of the wrapper function \code{smooth_incidence(..., smoothing_method = ""LOESS"")} +#' instead of \code{.smooth_LOESS}. +#' +#' This function implements the LOESS method for smoothing noisy data. +#' It relies on \code{\link[stats]{loess}}. +#' See the help section for \code{\link[stats]{loess}} for details on LOESS. +#' +#' +#' @inherit module_structure +#' @inheritParams inner_module +#' @param data_points_incl integer. Size of the window used in the LOESS algorithm. +#' The \code{span} parameter passed to \code{\link[stats]{loess}} is computed as +#' the ratio of \code{data_points_incl} and the number of time steps in the input data. +#' @param degree integer. LOESS degree. Must be 0, 1 or 2. +#' @param initial_Re_estimate_window integer. In order to help with the smoothing, the function extends +#' the data back in time, padding with values obtained by assuming a constant Re. This parameter represents +#' the number of timesteps in the beginning of \code{incidence_input} to take into account when computing +#' the average initial Re. +#' +.smooth_LOESS <- function(incidence_input, data_points_incl = 21, degree = 1, initial_Re_estimate_window = 5) { + .are_valid_argument_values(list( + list(incidence_input, ""module_input""), + list(data_points_incl, ""non_negative_number""), + list(degree, ""non_negative_number""), # minimal test; needs to be one of {0,1,2}, but stats::loess already throws if it isn't + list(initial_Re_estimate_window, ""positive_integer"") + )) + + incidence_vector <- .get_values(incidence_input) + + n_points <- length(incidence_vector) + sel_span <- data_points_incl / n_points + + n_pad <- round(length(incidence_vector) * sel_span * 0.5) + + avg_change_rate <- incidence_vector[2:(initial_Re_estimate_window + 1)] / incidence_vector[1:initial_Re_estimate_window] + avg_change_rate[!is.finite(avg_change_rate)] <- 1 + avg_change_rate <- mean(avg_change_rate) + + values_to_pad_with <- incidence_vector[1] * (avg_change_rate^(-n_pad:-1)) + + c_data <- data.frame( + value = c(values_to_pad_with, incidence_vector), + date_num = 1:(n_pad + n_points) + ) + + c_data.lo <- stats::loess(value ~ date_num, data = c_data, span = sel_span, degree = degree) + smoothed <- stats::predict(c_data.lo) + smoothed[smoothed < 0] <- 0 + raw_smoothed_counts <- smoothed[(n_pad + 1):length(smoothed)] + if(sum(raw_smoothed_counts, na.rm = T) >0) { + normalized_smoothed_counts <- + raw_smoothed_counts * sum(incidence_vector, na.rm = T) / sum(raw_smoothed_counts, na.rm = T) + } else { + normalized_smoothed_counts <- raw_smoothed_counts + } + + return(.get_module_output(normalized_smoothed_counts, .get_offset(incidence_input))) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-uncertainty.R",".R","12434","334","#' Summarise the uncertainty obtained from bootstrapping +#' +#' This function takes as input bootstrapped values: Re estimates or others +#' and builds uncertainty intervals from these bootstrapped values. +#' +#' This function is not meant to be needed by regular users. +#' It is exported as it can be useful when building 'pipes': +#' sequences of computations that weave together the different +#' modules provided by \code{estimateR}. +#' +#' @inherit uncertainty +#' @inheritDotParams .summarise_CI_bootstrap +#' +#' @return A dataframe containing Re estimates (column 'Re_estimate') +#' and confidence interval boundaries, with 4 columns like so: +#' \itemize{ +#' \item{\code{index_col}, the timestep index column} +#' \item{A column named \code{output_value_col}, +#' containing the central values (typically these are Re estimates) } +#' \item{\code{CI_up}, upper limit of the confidence interval} +#' \item{\code{CI_down}, the lower limit of the confidence interval} +#' } +#' +#' +#' @export +summarise_uncertainty <- function(bootstrapped_values, + original_values = NULL, + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + value_col = ""Re_estimate"", + output_value_col = ""Re_estimate"", + bootstrap_id_col = ""bootstrap_id"", + index_col = ""idx"", + ...) { + .are_valid_argument_values(list( + list(bootstrapped_values, ""bootstrap_estimates"", c(value_col, bootstrap_id_col, index_col)), + list(uncertainty_summary_method, ""uncertainty_summary_method""), + list(value_col, ""string""), + list(output_value_col, ""string""), + list(bootstrap_id_col, ""string""), + list(index_col, ""string"") + )) + + + dots_args <- .get_dots_as_list(...) + + bootstrapped_values <- bootstrapped_values %>% + dplyr::rename(!!output_value_col := .data[[value_col]]) + + if (!is.null(original_values)) { + .are_valid_argument_values(list(list(original_values, ""estimates"", index_col))) + original_values <- original_values %>% + dplyr::rename(!!output_value_col := .data[[value_col]]) + } + + if (uncertainty_summary_method == ""original estimate - CI from bootstrap estimates"") { + if (is.null(original_values)) { + stop(""'original_values' must be provided when using uncertainty method + 'original estimate - CI from bootstrap estimates'"") + } + + bootstrap_summary <- do.call( + "".summarise_CI_bootstrap"", + c( + list( + central_values = original_values, + bootstrapped_values = bootstrapped_values, + value_col = output_value_col, + bootstrap_id_col = bootstrap_id_col, + index_col = index_col + ), + .get_shared_args(.summarise_CI_bootstrap, dots_args) + ) + ) + } else if (uncertainty_summary_method == ""bagged mean - CI from bootstrap estimates"") { + central_values <- .summarise_bagged_mean( + original_values = original_values, + bootstrapped_values = bootstrapped_values, + value_col = output_value_col, + bootstrap_id_col = bootstrap_id_col, + index_col = index_col + ) + + bootstrap_summary <- do.call( + "".summarise_CI_bootstrap"", + c( + list( + central_values = central_values, + bootstrapped_values = bootstrapped_values, + value_col = output_value_col, + bootstrap_id_col = bootstrap_id_col, + index_col = index_col + ), + .get_shared_args(.summarise_CI_bootstrap, dots_args) + ) + ) + } else { + stop(""Uncertainty summary method is unknown."") + } + + return(bootstrap_summary) +} + +#' Build a confidence interval from bootstrapped values +#' +#' @inherit uncertainty +#' +#' @return dataframe with 4 columns: +#' \itemize{ +#' \item{\code{index_col}, the timestep index column} +#' \item{\code{value_col}, the value input in \code{central_values}} +#' \item{CI_up, the upper limit of the confidence interval} +#' \item{CI_down, the lower limit of the confidence interval} +#' } +.summarise_CI_bootstrap <- function(central_values, + bootstrapped_values, + value_col, + bootstrap_id_col, + index_col, + alpha = 0.95, + prefix_up = ""CI_up"", + prefix_down = ""CI_down"") { + .are_valid_argument_values(list( + list(central_values, ""estimates"", index_col), + list(bootstrapped_values, ""bootstrap_estimates"", c(value_col, bootstrap_id_col, index_col)), + list(value_col, ""string""), + list(bootstrap_id_col, ""string""), + list(index_col, ""string""), + list(alpha, ""numeric_between_zero_one""), + list(prefix_up, ""string""), + list(prefix_down, ""string"") + )) + + CI_down <- paste(prefix_down, value_col, sep = ""_"") + CI_up <- paste(prefix_up, value_col, sep = ""_"") + + high_quantile <- 1 - (1 - alpha) / 2 + + central_values <- central_values %>% + dplyr::select(.data[[index_col]], .data[[value_col]]) %>% + dplyr::filter(!is.na(.data[[value_col]])) + + value_with_uncertainty <- bootstrapped_values %>% + dplyr::select(.data[[index_col]], .data[[value_col]]) %>% + dplyr::filter(!is.na(.data[[value_col]])) %>% + dplyr::group_by(.data[[index_col]]) %>% + dplyr::summarize( + sd_mean = stats::sd(.data[[value_col]]), + .groups = ""drop"" + ) %>% + dplyr::right_join(central_values, by = index_col) %>% + dplyr::mutate( + !!CI_down := .data[[value_col]] - stats::qnorm(high_quantile) * .data$sd_mean, + !!CI_up := .data[[value_col]] + stats::qnorm(high_quantile) * .data$sd_mean + ) %>% + dplyr::mutate(!!CI_down := dplyr::if_else(.data[[CI_down]] < 0, 0, .data[[CI_down]])) %>% + dplyr::select(-.data$sd_mean) %>% + tidyr::complete(!!index_col := seq(min(.data[[index_col]]), max(.data[[index_col]]))) + + return(value_with_uncertainty) +} + +#' Compute bagged mean from bootstrapped replicates +#' +#' If \code{original_values} are included, +#' these values are included in the mean computation +#' along with the \code{bootstrapped_values}. +#' +#' @inherit uncertainty +#' +#' @return a dataframe containing a time step index column named \code{index_col} +#' and a column containing bagged mean values called \code{value_col} +.summarise_bagged_mean <- function(bootstrapped_values, + original_values = NULL, + value_col, + bootstrap_id_col, + index_col) { + .are_valid_argument_values(list( + list(bootstrapped_values, ""bootstrap_estimates"", c(value_col, bootstrap_id_col, index_col)), + list(value_col, ""string""), + list(bootstrap_id_col, ""string""), + list(index_col, ""string"") + )) + + + bootstrapped_values <- bootstrapped_values %>% + dplyr::select(.data[[index_col]], .data[[value_col]]) + + if (!is.null(original_values)) { + .are_valid_argument_values(list(list(original_values, ""estimates"", index_col))) + + original_values <- original_values %>% + dplyr::select(.data[[index_col]], .data[[value_col]]) + + bootstrapped_values <- bootstrapped_values %>% + dplyr::bind_rows(original_values) + } + + bagged_mean_value <- bootstrapped_values %>% + dplyr::filter(!is.na(.data[[value_col]])) %>% + dplyr::group_by(.data[[index_col]]) %>% + dplyr::summarize(!!value_col := mean(.data[[value_col]]), + .groups = ""drop"" + ) %>% + tidyr::complete(!!index_col := seq(min(.data[[index_col]]), max(.data[[index_col]]))) + + return(bagged_mean_value) +} + + +#' Utility for summarising uncertainty +#' +#' This function is not meant to be used by typical users. +#' It can be used to build custom pipes with \code{estimateR}. +#' +#' @inherit uncertainty +#' @inherit pipe_params +#' @inheritDotParams .summarise_CI_bootstrap +#' +#' @return A dataframe containing Re estimates (column 'Re_estimate') +#' and confidence interval boundaries, with 4 columns like so: +#' \itemize{ +#' \item{\code{index_col}, the timestep index column} +#' \item{A column named \code{output_value_col}, +#' containing the central values (typically these are Re estimates) } +#' \item{\code{CI_up}, upper limit of the confidence interval} +#' \item{\code{CI_down}, the lower limit of the confidence interval} +#' } +do_uncertainty_summary <- function(original_values, + bootstrapped_values, + uncertainty_summary_method, + value_col, + bootstrap_id_col, + index_col, + output_Re_only, + combine_bootstrap_and_estimation_uncertainties = FALSE, + Re_HPDs = NULL, + ...) { + dots_args <- .get_dots_as_list(...) + + CI_down_col_name <- paste0(""CI_down_"", value_col) + CI_up_col_name <- paste0(""CI_up_"", value_col) + + if (output_Re_only) { + estimates_with_uncertainty <- do.call( + ""summarise_uncertainty"", + c( + list( + original_values = original_values, + bootstrapped_values = bootstrapped_values, + uncertainty_summary_method = uncertainty_summary_method, + value_col = value_col, + output_value_col = value_col, + bootstrap_id_col = bootstrap_id_col, + index_col = index_col + ), + .get_shared_args(.summarise_CI_bootstrap, dots_args) + ) + ) + + CI_down_col_name <- paste0(""CI_down_"", value_col) + CI_up_col_name <- paste0(""CI_up_"", value_col) + + if (combine_bootstrap_and_estimation_uncertainties) { + estimates_with_uncertainty <- dplyr::full_join(estimates_with_uncertainty, + Re_HPDs, + by = index_col + ) %>% + dplyr::mutate( + !!CI_down_col_name := dplyr::if_else(.data[[CI_down_col_name]] > .data$Re_lowHPD, + .data$Re_lowHPD, .data[[CI_down_col_name]] + ), + !!CI_up_col_name := dplyr::if_else(.data[[CI_up_col_name]] < .data$Re_highHPD, + .data$Re_highHPD, .data[[CI_up_col_name]] + ) + ) %>% + dplyr::select(!c(.data$Re_lowHPD, .data$Re_highHPD)) + } + } else { + cols_to_summarise <- names(bootstrapped_values) + cols_to_summarise <- cols_to_summarise[!cols_to_summarise %in% c(index_col, bootstrap_id_col)] + + summaries <- lapply(cols_to_summarise, function(col_x) { + bootstrapped_estimates_of_interest <- bootstrapped_values %>% + dplyr::select(.data[[col_x]], .data[[index_col]], .data[[bootstrap_id_col]]) + + original_estimates_of_interest <- original_values %>% + dplyr::select(.data[[col_x]], .data[[index_col]], .data[[bootstrap_id_col]]) + + do.call( + ""summarise_uncertainty"", + c( + list( + original_values = original_estimates_of_interest, + bootstrapped_values = bootstrapped_estimates_of_interest, + uncertainty_summary_method = uncertainty_summary_method, + value_col = col_x, + output_value_col = col_x, + bootstrap_id_col = bootstrap_id_col, + index_col = index_col + ), + .get_shared_args(.summarise_CI_bootstrap, dots_args) + ) + ) + }) + + estimates_with_uncertainty <- summaries %>% + purrr::reduce(dplyr::full_join, by = index_col) + + if (combine_bootstrap_and_estimation_uncertainties) { + bootstrapped_CI_down_col_name <- paste0(""bootstrapped_CI_down_"", value_col) + bootstrapped_CI_up_col_name <- paste0(""bootstrapped_CI_up_"", value_col) + + + estimates_with_uncertainty <- dplyr::full_join(estimates_with_uncertainty, Re_HPDs, + by = index_col + ) %>% + dplyr::mutate( + !!bootstrapped_CI_down_col_name := .data[[CI_down_col_name]], + !!bootstrapped_CI_up_col_name := .data[[CI_up_col_name]] + ) %>% + dplyr::mutate( + !!CI_down_col_name := dplyr::if_else(.data[[CI_down_col_name]] > .data$Re_lowHPD, + .data$Re_lowHPD, .data[[CI_down_col_name]] + ), + !!CI_up_col_name := dplyr::if_else(.data[[CI_up_col_name]] < .data$Re_highHPD, + .data$Re_highHPD, .data[[CI_up_col_name]] + ) + ) + } + } + + return(estimates_with_uncertainty) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-distribution.R",".R","15729","444","#' Get relevant parameters from distribution +#' +#' @param f distribution function from stats package. +#' @inheritParams distribution +#' +#' @return list containing elements of \code{distribution} that overlap with arguments of \code{f} +.get_distribution_parms <- function(distribution, f) { + # Remove the name element from the distribution list + distribution <- within(distribution, rm(""name"")) + + # Only keep elements of 'distribution' that are arguments of function f + distribution_parms <- distribution[names(distribution) %in% methods::formalArgs(f)] + + return(distribution_parms) +} + +#' Get distribution function +#' +#' @param function_prefix character. 'd', 'q', 'p' or 'r'. see \code{\link[stats:Distributions]{stats::Distributions}} +#' @inheritParams distribution +#' +#' @return Density, distribution function, quantile function or random generation function for \code{distribution} +.get_distribution_function <- function(distribution, function_prefix) { + .are_valid_argument_values(list( + list(distribution, ""distribution""), + list(function_prefix, ""function_prefix"") + )) + f <- get(paste0(function_prefix, distribution[[""name""]]), envir = loadNamespace(""stats"")) + return(f) +} + +#' Obtain quantile values for a distribution +#' +#' @inheritParams distribution +#' @param p vector of probabilities +#' +#' @return vector of quantiles +.get_quantiles <- function(distribution, p) { + .are_valid_argument_values(list( + list(distribution, ""distribution""), + list(p, ""probability_distr_vector_high_tolerance"") + )) + q_distribution_function <- .get_distribution_function( + distribution = distribution, + function_prefix = ""q"" + ) + + distribution_parms <- .get_distribution_parms( + distribution = distribution, + f = q_distribution_function + ) + + return(do.call(q_distribution_function, c(list(p = p), distribution_parms))) +} + +#' Draw samples from a probability distribution. +#' +#' @inheritParams distribution +#' @param n integer. Number of samples to draw. +#' +#' @return vector containing \code{n} samples of \code{distribution} +.sample_from_distribution <- function(distribution, n) { + .are_valid_argument_values(list( + list(distribution, ""distribution""), + list(n, ""integer"") + )) + + r_distribution_function <- .get_distribution_function( + distribution = distribution, + function_prefix = ""r"" + ) + + distribution_parms <- .get_distribution_parms( + distribution = distribution, + f = r_distribution_function + ) + + return(do.call(r_distribution_function, c(list(n = n), distribution_parms))) +} + +#' Discretize a probability distribution. +#' +#' @param right_boundary positive numeric value. +#' Maximum number of time steps to discretize the \code{distribution} over. +#' @param offset_by_one boolean. +#' Set to TRUE if \code{distribution} represents the fit of data that was offset by one +#' (\code{fitted_data = original_data + 1}) to accommodate zeroes in \code{original_data}. +#' @inheritParams distribution +#' +#' @return vector containing weights of the discretized probability distribution. +.get_discretized_distribution <- function(distribution, right_boundary, offset_by_one) { + .are_valid_argument_values(list( + list(distribution, ""distribution""), + list(right_boundary, ""positive_number""), + list(offset_by_one, ""boolean"") + )) + + p_distribution_function <- .get_distribution_function( + distribution = distribution, + function_prefix = ""p"" + ) + + distribution_parms <- .get_distribution_parms( + f = p_distribution_function, + distribution = distribution + ) + + if (offset_by_one) { + x_values <- c(0, seq(from = 1.5, to = right_boundary, by = 1)) + } else { + x_values <- c(0, seq(from = 0.5, to = right_boundary, by = 1)) + } + + cdf_values <- do.call(p_distribution_function, c(list(q = x_values), distribution_parms)) + + if (length(cdf_values) == 1) { + return(0) + } else { + return(diff(cdf_values)) + } +} + +#' Get the number of steps before a quantile is reached. +#' +#' @inheritParams distribution +#' @param max_quantile numeric value. Upper quantile that needs to be reached. +#' +#' @return number of time steps required to for the probability distribution to reach \code{max_quantile} +.get_right_boundary_for_distribution_vector <- function(distribution, max_quantile) { + .are_valid_argument_values(list( + list(distribution, ""distribution""), + list(max_quantile, ""numeric_between_zero_one"") + )) + + right_boundary <- ceiling(.get_quantiles(distribution, p = max_quantile)) + 1 + + # Set the right boundary to at least two + right_boundary <- max(right_boundary, 2) + + return(right_boundary) +} + +#' Build a discretized probability distribution vector +#' +#' The discretization is done by integrating the probability density +#' on (0, 0.5), (0.5, 1.5), (1.5, 2.5)... +#' +#' @example man/examples/build_delay_distribution.R +#' +#' @inheritParams distribution +#' @inheritParams .get_discretized_distribution +#' @param max_quantile numeric value between 0 and 1. +#' Upper quantile reached by the last element in the discretized distribution vector. +#' +#' @return vector containing the discretized probability distribution vector of \code{distribution}. +#' +#' @export +build_delay_distribution <- function(distribution, + max_quantile = 0.999, + offset_by_one = FALSE) { + .are_valid_argument_values(list( + list(distribution, ""distribution""), + list(max_quantile, ""numeric_between_zero_one""), + list(offset_by_one, ""boolean"") + )) + + right_boundary <- .get_right_boundary_for_distribution_vector( + distribution = distribution, + max_quantile = max_quantile + ) + + distribution_vector <- .get_discretized_distribution( + distribution = distribution, + right_boundary = right_boundary, + offset_by_one = offset_by_one + ) + + return(distribution_vector) +} + +#' Return probability distribution vector or matrix +#' +#' Can take a \code{distribution} list, a probability distribution vector, +#' a probability distribution matrix or empirical delay data as input. +#' If \code{delay} is already a delay distribution vector or matrix, it is returned as is. +#' +#' If \code{delay} is a single \code{distribution} list, +#' this function builds and return the vector of discretized probability distribution. +#' If \code{delay} is a list of \code{distribution} lists, +#' this function builds and return the matrix of discretized probability distribution, +#' with dimensions corresponding to the number of delays in the list. +#' If \code{delay} is a vector, the function checks that \code{delay} +#' is a valid discretized probability distribution and returns it. +#' Similarly, if \code{delay} is a matrix, +#' the function checks that it is in the correct format and returns it. +#' In a matrix, each column index corresponds to a time step associated with a date of event, +#' each row index corresponds to a time step associated with a date of event observation. +#' Each entry m_ij corresponds to the probability that an event occurring at time step j +#' is observed at time step i. +#' Matrices must be lower-triangular. Sums over columns must not exceed 1. +#' +#' See \code{\link{build_delay_distribution}} for details on the \code{distribution} list format; +#' see \code{\link{get_matrix_from_empirical_delay_distr}} for details on the empirical delay data format. +#' +#' @param delay list, vector, matrix or dataframe. +#' Delay distribution to transform or validate +#' into a vector of discretized probability distribution. +#' @inheritDotParams build_delay_distribution -distribution +#' @inherit dating +#' @inherit delay_empirical +#' +#' @return vector or matrix of discretized probability distribution. +.get_delay_distribution <- function(delay, + n_report_time_steps = NULL, + ref_date = NULL, + time_step = ""day"", + ...) { + .are_valid_argument_values(list( + list(delay, ""delay_single_or_list"", 1), # We put '1' here, because we do not care here about checking the dimension of the matrix. + list(n_report_time_steps, ""null_or_int""), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step"") + )) + + dots_args <- .get_dots_as_list(...) + + if (is.data.frame(delay)) { + if (is.null(n_report_time_steps) || n_report_time_steps == 0) { + stop(""Empirical delay data input but 'n_report_time_steps' parameter was not set or set to zero."") + } + + delay_distribution <- do.call( + ""get_matrix_from_empirical_delay_distr"", + c( + list( + empirical_delays = delay, + n_report_time_steps = n_report_time_steps, + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(list(get_matrix_from_empirical_delay_distr), dots_args) + ) + ) + } else if (is.list(delay)) { + if(.is_single_delay(delay)) { + delay_distribution <- do.call( + ""build_delay_distribution"", + c( + list(distribution = delay), + .get_shared_args(list(build_delay_distribution), dots_args) + ) + ) + } else { + delay_distribution <- do.call( + "".get_delay_matrix_from_delay_distributions"", + c( + list(distributions = delay), + .get_shared_args(list(build_delay_distribution), dots_args) + ) + ) + } + + } else if (is.matrix(delay) || .is_numeric_vector(delay)) { + delay_distribution <- delay + } else { + stop(""Unknown delay type."") + } + + return(delay_distribution) +} + +#' Build delay distribution matrix from a single delay or a list of delay distributions +#' +#' @param distributions single distribution or list of distributions, +#' each element is either a distribution list or discretized probability distribution vector. +#' @param N integer. Dimension of output matrix. +#' Ignored if a list of distributions is provided. +#' @inheritDotParams build_delay_distribution -distribution +#' +#' @return delay distribution matrix +.get_delay_matrix_from_delay_distributions <- function(distributions, N = 1, ...) { + + .are_valid_argument_values(list( + # We put '1' here, because we do not care here about checking the dimension of the matrix. + list(distributions, ""delay_single_or_list"", 1), + list(N, ""positive_integer"") + )) + + is_single_distribution <- .is_single_delay(distributions) + dots_args <- .get_dots_as_list(...) + + if(is_single_distribution) { + # Generate delay distribution vector + delay_distribution_vector <- do.call( + "".get_delay_distribution"", + c( + list(delay = distributions), + .get_shared_args(.get_delay_distribution, dots_args) + ) + ) + if (N >= length(delay_distribution_vector)) { + delay_distribution_vector <- c(delay_distribution_vector, rep(0, times = N - length(delay_distribution_vector))) + } + + delay_distribution_matrix <- matrix(0, nrow = N, ncol = N) + for (i in 1:N) { + delay_distribution_matrix[, i] <- c(rep(0, times = i - 1), delay_distribution_vector[1:(N - i + 1)]) + } + + return(delay_distribution_matrix) + + } else { + # List of distributions, each distribution correspond to the delay distribution of a particular time step. + # These distributions fill the delay matrix by column (below the diagonal). + + for (i in 1:length(distributions)) { + .are_valid_argument_values(list(list(distributions[[i]], ""distribution""))) + } + dots_args <- .get_dots_as_list(...) + + # Generate list of delay distribution vectors + delay_distribution_list <- lapply(distributions, function(distr) { + do.call( + ""build_delay_distribution"", + c( + list(distribution = distr), + .get_shared_args(build_delay_distribution, dots_args) + ) + ) + }) + + N <- length(distributions) + + # Initialize empty matrix + delay_distribution_matrix <- matrix(0, nrow = N, ncol = N) + + # Fill matrix by column + for (i in 1:N) { + delay_distr <- delay_distribution_list[[i]] + + # Right-pad delay_distr vector with zeroes if needed + if (length(delay_distr) < N - i + 1) { + delay_distr <- c(delay_distr, rep(0, times = N - i + 1 - length(delay_distr))) + } + delay_distribution_matrix[, i] <- c(rep(0, times = i - 1), delay_distr[1:(N - i + 1)]) + } + + return(delay_distribution_matrix) + } +} + +#' Augment a delay distribution by left padding with new columns. +#' +#' This function reshapes a discretized delay distribution matrix +#' by left-padding it with \code{n_col_augment} columns. +#' Because the output matrix must also be lower-triangular, +#' additional rows are also padded to the top rows. +#' This function allows one to extend further in the past +#' the range of the initial delay distribution matrix. +#' This is useful when convolving that delay distribution matrix +#' with another delay distribution. +#' +#' The columns that are added replicate the left-most column of +#' \code{delay_distribution_matrix}. +#' +#' @inheritParams distribution +#' @param n_col_augment an integer. Number of columns to left-pad +#' \code{delay_distribution_matrix} with. +#' +#' @return If \code{delay_distribution_matrix} is of dimension N, +#' then the result is of dimension N + \code{n_col_augment}. +.left_augment_delay_distribution <- function(delay_distribution_matrix, + n_col_augment) { + .are_valid_argument_values(list( + list(delay_distribution_matrix, ""probability_distr_matrix"", 0), + list(n_col_augment, ""positive_integer"") + )) + + n_col_original <- ncol(delay_distribution_matrix) + n_col_augmented <- n_col_original + n_col_augment + + # Initialize empty matrix + augmented_matrix <- matrix(0, nrow = n_col_augmented, ncol = n_col_augmented) + + # Fill matrix by column + + # Start by duplicating first column in original matrix into 'n_col_augment' first columns of augmented_matrix + for (i in 1:n_col_augment) { + augmented_matrix[, i] <- c(rep(0, times = i - 1), delay_distribution_matrix[, 1], rep(0, times = n_col_augment - i + 1)) + } + + # Then fill with original matrix, adding the required zero on the top rows + for (i in (n_col_augment + 1):n_col_augmented) { + augmented_matrix[, i] <- c(rep(0, times = n_col_augment), delay_distribution_matrix[, i - n_col_augment]) + } + + return(augmented_matrix) +} + +#' Get initial shift for deconvolution step +#' +#' Return the time step corresponding to a specified quantile +#' for a particular distribution (specified as a vector). +#' +#' @inheritParams distribution +#' +#' @return an integer value corresponding to the rounded value +#' corresponding to the specified quantile of the input delay distribution. +#' The returned calue is always non-negative. +.get_time_steps_quantile <- function(delay_distribution_vector, quantile = 0.5) { + .are_valid_argument_values( + list( + list(delay_distribution_vector, ""probability_distr_vector""), + list(quantile, ""numeric_between_zero_one"") + ) + ) + + initial_shift <- ceiling(min(which(cumsum(delay_distribution_vector) > quantile))) - 1 + initial_shift <- max(initial_shift, 0, na.rm = TRUE) + return(initial_shift) +} + +#' Is input a single delay object? +#' +#' @param delay_list list or single delay object +#' +#' @return TRUE if single delay, FALSE otherwise +.is_single_delay <- function(delay_list) { + .are_valid_argument_values(list( + # We put '1' here, because we do not care here about checking the dimension of the matrix. + list(delay_list, ""delay_single_or_list"", 1) + )) + + if (is.list(delay_list) && !is.data.frame(delay_list)) { + is_distribution <- try(.is_valid_distribution(delay_list, ""dummy_name""), silent = TRUE) + if (""try-error"" %!in% class(is_distribution)) { + return(TRUE) + } + return(FALSE) + } else { + return(TRUE) + } +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-testing.R",".R","7126","142","#' Utility function to print vectors in a copy-pastable format +#' +#' @param a numeric vector +#' @param digits integer. Number of digits to round. +#' +#' @return Print vector as string +.print_vector <- function(a, digits = 2) { + cat(paste0(""c("", paste(round(a, digits = digits), collapse = "",""), "")"")) +} + +#' Generate artificial delay data. +#' +#' This utility can be used to build toy examples to test functions dealing with empirical delay data. +#' It is very basic in what it simulates. +#' A random walk is simulated over \code{n_time_steps}, representing the incidence through time. +#' The result of this simulation is offset so that all values are positive. +#' Then, for each time step, \code{n} samples from a delay distribution are taken, +#' with \code{n} being the incidence value at this time step. +#' The random draws are then multiplied by a factor (>1 or <1) to simulate +#' a gradual shift in the delay distribution through time. +#' This multiplication factor is calculated +#' by linearly interpolating between 1 (at the first time step), +#' and \code{delay_ratio_start_to_end} linearly, +#' from 1 at the first time step to \code{ratio_delay_end_to_start} +#' at the last time step. +#' +#' @param origin_date Date of first infection. +#' @param n_time_steps interger. Number of time steps to generate delays over +#' @param ratio_delay_end_to_start numeric value. +#' Shift in delay distribution from start to end. +#' @param distribution_initial_delay Distribution in list format. +#' @param seed integer. Optional RNG seed. +#' @inherit dating +#' +#' @return dataframe. Simulated delay data. +.generate_delay_data <- function(origin_date = as.Date(""2020-02-01""), + n_time_steps = 100, + time_step = ""day"", + ratio_delay_end_to_start = 2, + distribution_initial_delay = list(name = ""gamma"", shape = 6, scale = 5), + seed = NULL) { + .are_valid_argument_values(list( + list(origin_date, ""date""), + list(n_time_steps, ""positive_integer""), + list(time_step, ""time_step""), + list(ratio_delay_end_to_start, ""number""), + list(distribution_initial_delay, ""distribution""), + list(seed, ""null_or_int"") + )) + + if (!is.null(seed)) { + set.seed(seed) + } + + random_pop_size_walk <- cumsum(sample(c(-1, 0, 1), n_time_steps, TRUE)) + pop_size <- random_pop_size_walk - min(0, min(random_pop_size_walk)) + 1 + + event_dates <- seq.Date(from = origin_date, length.out = n_time_steps, by = time_step) + + delays <- lapply(1:n_time_steps, function(i) { + # Sample a number of draws from the gamma delay distribution, based on the pop size at time step i + raw_sampled_delays <- .sample_from_distribution( + distribution = distribution_initial_delay, + n = pop_size[i] + ) + # Multiply these samples by a factor that accounts for the linear inflation or deflation of delays. + sampled_delays <- round(raw_sampled_delays * (1 + i / n_time_steps * (ratio_delay_end_to_start - 1))) + + return(tibble::tibble(event_date = event_dates[i], report_delay = sampled_delays)) + }) + + return(dplyr::bind_rows(delays)) +} + +#' Utility function that generates delay data, assuming a different delay between event and observation for each individual day. +#' It then generates a delay matrix and computes the RMSE between the parameters of the gamma distributions passed as arguments and the ones recovered from the delay matrix. +#' The shapes and scales of the gamma distributions are specified as parameters, and the number of timesteps is assumed to be equal to the length of these vectors. +#' +#' This funciotn is useful for testing purposes. +#' @param original_distribution_shapes vector. Specifies the shapes for the gamma distributions. +#' @param original_distribution_scales vector. Specifies the scales for the gamma distributions. +#' @param nr_distribution_samples integer. How many cases to be sampled for each timestep. +#' +#' @return A list with the computed RMSE. It has two elements: $shape_rmse and $scale_rmse +.delay_distribution_matrix_rmse_compute <- function(original_distribution_shapes, original_distribution_scales, nr_distribution_samples = 500) { + + # Create a vector with all dates in observation interval + start_date <- as.Date(""2021/04/01"") + time_steps <- length(original_distribution_shapes) + end_date <- start_date + time_steps + available_dates <- seq(start_date, end_date, by = ""day"") + + # Build the delay data; Events on each individual day are assumed to be observed according to a different gamma distribution, as specified by original_distribution_shapes and original_distribution_scales, + sampled_report_delays <- c() + report_dates <- as.Date(c()) + for (i in 1:time_steps) { + new_sampled_report_delays <- .sample_from_distribution(list(name = ""gamma"", shape = original_distribution_shapes[i], scale = original_distribution_scales[i]), nr_distribution_samples) + sampled_report_delays <- c(sampled_report_delays, new_sampled_report_delays) + new_report_dates <- rep(available_dates[i], nr_distribution_samples) + report_dates <- c(report_dates, new_report_dates) + } + delay_data <- dplyr::tibble(event_date = report_dates, report_delay = sampled_report_delays) + result <- get_matrix_from_empirical_delay_distr(delay_data, time_steps, fit = ""gamma"", return_fitted_distribution = TRUE) + + delay_matrix <- result$matrix + distrib_list <- result$distributions + + # Get the shapes and scales of the gamma distributions fitted by the get_matrix_from_empirical_delay_distr function + distribution_shapes <- c() + distribution_scales <- c() + + for (distribution in distrib_list) { + distribution_shapes <- c(distribution_shapes, distribution$shape) + distribution_scales <- c(distribution_scales, distribution$scale) + } + + # Compute the RMSE between the desired gamma distribution shapes and scales, and the ones obtained by the get_matrix_from_empirical_delay_distr function + start_index <- length(distribution_shapes) - length(original_distribution_shapes) + 1 + shape_rmse <- Metrics::rmse(distribution_shapes[start_index:length(distribution_shapes)], original_distribution_shapes) / mean(original_distribution_shapes) + scale_rmse <- Metrics::rmse(distribution_scales[start_index:length(distribution_scales)], original_distribution_scales) / mean(original_distribution_scales) + + return(list(shape_rmse = shape_rmse, scale_rmse = scale_rmse)) +} + +#' Test whether a delay matrix columns sum to less than one. +#' +#' @param matrix input matrix +#' @param full_cols number of full columns. +#' Full columns must sum to 1. +#' @param tolerance tolerance on the result +#' +#' @return TRUE if test passed +expect_delay_matrix_sums_lte_1 <- function(matrix, full_cols = 0, tolerance = 1E-3) { + if (full_cols > 0 && full_cols < ncol(matrix)) { + sums_full_cols <- apply(matrix[, 1:full_cols], MARGIN = 2, FUN = sum) + testthat::expect_equal(sums_full_cols, rep(1, times = length(sums_full_cols)), tolerance = tolerance) + } + + sum_all_cols <- apply(matrix, MARGIN = 2, FUN = sum) + testthat::expect_lte(max(abs(sum_all_cols)), 1) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/pipe.R",".R","36501","1027","# TODO later. reduce duplication between bootstrapping pipe functions + +#' Estimate Re from incidence and estimate uncertainty with block-bootstrapping +#' +#' An estimation of the effective reproductive number through time is made +#' on the original incidence data. +#' Then, the same estimation is performed on a number of bootstrap samples built from the original incidence data. +#' The estimate on the original data is output along with confidence interval boundaries +#' built from the distribution of bootstrapped estimates. +#' +#' @example man/examples/get_block_bootstrapped_estimate.R +#' +#' @inheritParams pipe_params +#' @inheritParams bootstrap_params +#' @inheritParams module_methods +#' @inheritParams module_structure +#' @inheritParams universal_params +#' @inheritParams delay_high +#' @inheritParams dating +#' @inheritDotParams .smooth_LOESS -incidence_input +#' @inheritDotParams .deconvolve_incidence_Richardson_Lucy -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_sliding_window -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_piecewise_constant -incidence_input -output_HPD +#' +#' @inherit bootstrap_return return +#' @export +get_block_bootstrapped_estimate <- function(incidence_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + combine_bootstrap_and_estimation_uncertainties = FALSE, + delay, + import_incidence_data = NULL, + ref_date = NULL, + time_step = ""day"", + output_Re_only = TRUE, + ...) { + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(N_bootstrap_replicates, ""non_negative_number""), + list(smoothing_method, ""smoothing_method""), + list(deconvolution_method, ""deconvolution_method""), + list(estimation_method, ""estimation_method""), + list(uncertainty_summary_method, ""uncertainty_summary_method""), + list(combine_bootstrap_and_estimation_uncertainties, ""boolean""), + list(delay, ""delay_single_or_list"", .get_input_length(incidence_data)), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step""), + list(output_Re_only, ""boolean"") + )) + + dots_args <- .get_dots_as_list(...) + + index_col <- ""idx"" + bootstrap_id_col <- ""bootstrap_id"" + + # Display progress bar + # progress_bar <- utils::txtProgressBar(min = 0, max = N_bootstrap_replicates + 1, style = 3) + # utils::setTxtProgressBar(progress_bar, 0) + + # Prepare delay distribution vector or matrix early on as it spares the need to redo the same operation for each bootstrap replicate + total_delay_distribution <- do.call( + ""convolve_delays"", + c( + list( + delays = delay, + n_report_time_steps = length(incidence_data), + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(convolve_delays, dots_args) + ) + ) + + smooth_deconvolve_estimate_dots_args <- .get_shared_args( + list( + .smooth_LOESS, + .deconvolve_incidence_Richardson_Lucy, + .estimate_Re_EpiEstim_sliding_window, + .estimate_Re_EpiEstim_piecewise_constant, + get_matrix_from_empirical_delay_distr, + build_delay_distribution + ), + dots_args + ) + + original_result <- do.call( + ""estimate_Re_from_noisy_delayed_incidence"", + c( + list( + incidence_data = incidence_data, + smoothing_method = smoothing_method, + deconvolution_method = deconvolution_method, + estimation_method = estimation_method, + delay = total_delay_distribution, + import_incidence_data = import_incidence_data, + ref_date = NULL, + output_Re_only = FALSE, + include_index = TRUE, + index_col = index_col, + output_HPD = combine_bootstrap_and_estimation_uncertainties + ), + smooth_deconvolve_estimate_dots_args + ) + ) + + if (combine_bootstrap_and_estimation_uncertainties) { + # Keep the Re estimation HPDs for later + Re_HPDs <- original_result %>% + dplyr::select(.data[[index_col]], .data$Re_highHPD, .data$Re_lowHPD) + + # Remove them from the original_result variable + original_result <- original_result %>% + dplyr::select(!c(.data$Re_highHPD, .data$Re_lowHPD)) + } else { + Re_HPDs <- NULL + } + + original_result[[bootstrap_id_col]] <- 0 + + bootstrapping_results <- list(original_result) + + for (i in 1:N_bootstrap_replicates) { + + # utils::setTxtProgressBar(progress_bar, i) + + bootstrapped_incidence <- do.call( + ""get_bootstrap_replicate"", + c( + list( + incidence_data = incidence_data, + bootstrapping_method = ""non-parametric block boostrap"" + ), + .get_shared_args( + list( + .block_bootstrap, + .block_bootstrap_overlap_func, + .smooth_LOESS + ), + dots_args + ) + ) + ) + + if (!is.null(import_incidence_data)) { + .are_valid_argument_values(list( + list(import_incidence_data, ""module_input"") + )) + + bootstrapped_import_incidence <- do.call( + ""get_bootstrap_replicate"", + c( + list( + incidence_data = import_incidence_data, + bootstrapping_method = ""non-parametric block boostrap"" + ), + .get_shared_args( + list( + .block_bootstrap, + .block_bootstrap_overlap_func, + .smooth_LOESS + ), + dots_args + ) + ) + ) + } else { + bootstrapped_import_incidence <- NULL + } + + bootstrapping_result <- do.call( + ""estimate_Re_from_noisy_delayed_incidence"", + c( + list( + incidence_data = bootstrapped_incidence, + smoothing_method = smoothing_method, + deconvolution_method = deconvolution_method, + estimation_method = estimation_method, + delay = total_delay_distribution, + import_incidence_data = bootstrapped_import_incidence, + ref_date = NULL, + output_Re_only = FALSE, + include_index = TRUE, + index_col = index_col, + output_HPD = FALSE + ), + smooth_deconvolve_estimate_dots_args + ) + ) + + bootstrapping_result[[bootstrap_id_col]] <- i + + bootstrapping_results <- c(bootstrapping_results, list(bootstrapping_result)) + } + + bootstrapped_estimates <- dplyr::bind_rows(bootstrapping_results) + + original_estimates <- bootstrapped_estimates %>% + dplyr::filter(.data[[bootstrap_id_col]] == 0) + + bootstrapped_estimates <- bootstrapped_estimates %>% + dplyr::filter(.data[[bootstrap_id_col]] > 0) + + estimates_with_uncertainty <- do.call( + ""do_uncertainty_summary"", + c( + list( + original_values = original_estimates, + bootstrapped_values = bootstrapped_estimates, + uncertainty_summary_method = uncertainty_summary_method, + value_col = ""Re_estimate"", + bootstrap_id_col = bootstrap_id_col, + index_col = index_col, + output_Re_only = output_Re_only, + combine_bootstrap_and_estimation_uncertainties = combine_bootstrap_and_estimation_uncertainties, + Re_HPDs = Re_HPDs + ), + .get_shared_args(.summarise_CI_bootstrap, dots_args) + ) + ) + + + if (!is.null(ref_date)) { + estimates_with_uncertainty <- .add_date_column(estimates_with_uncertainty, + ref_date = ref_date, + time_step = time_step, + index_col = index_col, + keep_index_col = FALSE + ) + } + + # Close progress bar + # utils::setTxtProgressBar(progress_bar, N_bootstrap_replicates + 1) + # close(progress_bar) + + pretty_results <- do.call( + "".prettify_result"", + c( + list(data = estimates_with_uncertainty), + .get_shared_args(.prettify_result, dots_args) + ) + ) + + return(pretty_results) +} + +#' Estimate Re from incidence data +#' +#' This pipe function combines a smoothing step using (to remove noise from the original observations), +#' a deconvolution step (to retrieve infection events from the observed delays), +#' and an Re estimation step wrapping around \code{\link[EpiEstim]{estimate_R}}. +#' +#' The \code{\link[=smooth_incidence]{smoothing step}} uses the LOESS method by default. +#' The \code{\link[=deconvolve_incidence]{deconvolution step}} uses the Richardson-Lucy algorithm by default. +#' The \code{\link[=estimate_Re]{Re estimation}} uses the Cori method with a sliding window by default. +#' +#' @example man/examples/estimate_Re_from_noisy_delayed_incidence.R +#' +#' @inheritParams module_structure +#' @inheritParams module_methods +#' @inheritParams universal_params +#' @inheritParams pipe_params +#' @inheritParams delay_high +#' @inheritParams dating +#' @inheritDotParams .smooth_LOESS -incidence_input +#' @inheritDotParams .deconvolve_incidence_Richardson_Lucy -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_sliding_window -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_piecewise_constant -incidence_input -output_HPD +#' +#' @return Time series of effective reproductive number estimates through time. +#' If \code{ref_date} is provided then a date column is included with the output. +#' @export +estimate_Re_from_noisy_delayed_incidence <- function(incidence_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + delay, + import_incidence_data = NULL, + ref_date = NULL, + time_step = ""day"", + output_Re_only = TRUE, + ...) { + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(smoothing_method, ""smoothing_method""), + list(deconvolution_method, ""deconvolution_method""), + list(estimation_method, ""estimation_method""), + list(delay, ""delay_single_or_list"", .get_input_length(incidence_data)), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step""), + list(output_Re_only, ""boolean"") + )) + + dots_args <- .get_dots_as_list(...) + + smoothed_incidence <- do.call( + ""smooth_incidence"", + c( + list( + incidence_data = incidence_data, + smoothing_method = smoothing_method + ), + .get_shared_args(.smooth_LOESS, dots_args) + ) + ) + + deconvolved_incidence <- do.call( + ""deconvolve_incidence"", + c( + list( + incidence_data = smoothed_incidence, + deconvolution_method = deconvolution_method, + delay = delay + ), + .get_shared_args( + list( + .deconvolve_incidence_Richardson_Lucy, + convolve_delays + ), + dots_args + ) + ) + ) + + if (!is.null(import_incidence_data)) { + .are_valid_argument_values(list( + list(import_incidence_data, ""module_input"") + )) + + smoothed_import_incidence <- do.call( + ""smooth_incidence"", + c( + list( + incidence_data = import_incidence_data, + smoothing_method = smoothing_method + ), + .get_shared_args(.smooth_LOESS, dots_args) + ) + ) + + deconvolved_import_incidence <- do.call( + ""deconvolve_incidence"", + c( + list( + incidence_data = smoothed_import_incidence, + deconvolution_method = deconvolution_method, + delay = delay + ), + .get_shared_args( + list( + .deconvolve_incidence_Richardson_Lucy, + convolve_delays + ), + dots_args + ) + ) + ) + } else { + deconvolved_import_incidence <- NULL + } + + estimated_Re <- do.call( + ""estimate_Re"", + c( + list( + incidence_data = deconvolved_incidence, + estimation_method = estimation_method, + simplify_output = FALSE, + import_incidence_input = deconvolved_import_incidence + ), + .get_shared_args( + list( + .estimate_Re_EpiEstim_sliding_window, + .estimate_Re_EpiEstim_piecewise_constant + ), dots_args) + ) + ) + + if (is.null(estimated_Re)) { + stop(""Failed to produce Re estimates."") + } + + if (output_Re_only) { + merged_results <- estimated_Re + } else { + # TODO later. simplify this call (create util function) + test_if_single_output <- try(.is_valid_module_input(estimated_Re, ""estimated_Re""), silent = TRUE) + if (!(""try-error"" %in% class(test_if_single_output))) { + estimated_Re <- list(""Re_estimate"" = estimated_Re) + } + + merged_results <- do.call( + ""merge_outputs"", + c( + list( + output_list = c( + list( + ""observed_incidence"" = incidence_data, + ""smoothed_incidence"" = smoothed_incidence, + ""deconvolved_incidence"" = deconvolved_incidence + ), + estimated_Re + ), + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(merge_outputs, dots_args) + ) + ) + } + + pretty_results <- do.call( + "".prettify_result"", + c( + list(data = merged_results), + .get_shared_args(.prettify_result, dots_args) + ) + ) + + return(pretty_results) +} + +#' Infer timeseries of infection events from incidence data of delayed observations +#' +#' This function takes as input incidence data of delayed observations of infections events, +#' as well as the probability distribution(s) of the delay(s). +#' It returns an inferred incidence of infection events. +#' +#' This function can account for the observations being dependent on future delayed observations, +#' with the \code{is_partially_reported_data} flag. +#' For instance, if the incidence data represents symptom onset events, usually these events +#' are dependent on a secondary delayed observation: a case confirmation typically, or +#' a hospital admission or any other type of event. +#' When setting \code{is_partially_reported_data} to \code{TRUE}, +#' use the \code{delay_until_final_report} argument to specify the delay +#' from infection until this secondary delayed observation. +#' +#' @example man/examples/get_infections_from_incidence.R +#' +#' @inheritParams module_structure +#' @inheritParams module_methods +#' @inheritParams pipe_params +#' @inheritParams delay_high +#' @inheritParams dating +#' @inheritDotParams .smooth_LOESS -incidence_input +#' @inheritDotParams .deconvolve_incidence_Richardson_Lucy -incidence_input +#' @inheritDotParams merge_outputs -output_list -include_index -index_col +#' @inheritDotParams nowcast -incidence_data -delay_until_final_report +#' +#' @return Time series of infections through time. +#' If \code{ref_date} is provided then a date column is included with the output. +#' @export +get_infections_from_incidence <- function(incidence_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + delay, + is_partially_reported_data = FALSE, + delay_until_final_report = NULL, + output_infection_incidence_only = TRUE, + ref_date = NULL, + time_step = ""day"", + ...) { + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(smoothing_method, ""smoothing_method""), + list(deconvolution_method, ""deconvolution_method""), + list(delay, ""delay_single_or_list"", .get_input_length(incidence_data)), + list(is_partially_reported_data, ""boolean""), + list(output_infection_incidence_only, ""boolean""), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step"") + )) + + dots_args <- .get_dots_as_list(...) + + original_incidence_data <- incidence_data + + if (is_partially_reported_data) { + .are_valid_argument_values(list(list(delay_until_final_report, ""delay_single_or_list"", .get_input_length(incidence_data)))) + + incidence_data <- do.call( + ""nowcast"", + c( + list( + incidence_data = incidence_data, + delay_until_final_report = delay_until_final_report, + ref_date = NULL, + time_step = ""day"" + ), + .get_shared_args(nowcast, dots_args) + ) + ) + } + + smoothed_incidence <- do.call( + ""smooth_incidence"", + c( + list( + incidence_data = incidence_data, + smoothing_method = smoothing_method + ), + .get_shared_args(.smooth_LOESS, dots_args) + ) + ) + + deconvolved_incidence <- do.call( + ""deconvolve_incidence"", + c( + list( + incidence_data = smoothed_incidence, + deconvolution_method = deconvolution_method, + delay = delay + ), + .get_shared_args( + list( + .deconvolve_incidence_Richardson_Lucy, + convolve_delays + ), + dots_args + ) + ) + ) + + if (output_infection_incidence_only) { + pretty_results <- deconvolved_incidence + } else { + if (is_partially_reported_data) { + output_list <- list( + ""observed_incidence"" = original_incidence_data, + ""corrected_incidence"" = incidence_data, + ""smoothed_incidence"" = smoothed_incidence, + ""deconvolved_incidence"" = deconvolved_incidence + ) + } else { + output_list <- list( + ""observed_incidence"" = incidence_data, + ""smoothed_incidence"" = smoothed_incidence, + ""deconvolved_incidence"" = deconvolved_incidence + ) + } + + merged_results <- do.call( + ""merge_outputs"", + c( + list( + output_list = output_list, + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(merge_outputs, dots_args) + ) + ) + pretty_results <- do.call( + "".prettify_result"", + c( + list(data = merged_results), + .get_shared_args(.prettify_result, dots_args) + ) + ) + } + + + return(pretty_results) +} + + +# TODO later. allow to pass a third argument for delays: the convolution of all delays (to speed up bootstrapping) +#' Estimate Re from delayed observations of infection events. +#' +#' This function allows for combining two different incidence time series, +#' see Details . +#' The two timeseries can represent events that are differently delayed from the original infection events. +#' The two data sources must not have any overlap in the events recorded. +#' The function can account for the one of the two types of events to require +#' the future observation of the other type of event. +#' For instance, one type can be events of symptom onset, and the other be case confirmation. +#' Typically, the recording of a symptom onset event will require a future case confirmation. +#' If so, the \code{partial_observation_requires_full_observation} flag should be set to \code{TRUE}. +#' +#' @example man/examples/estimate_from_combined_observations.R +#' +#' @inheritParams module_structure +#' @inheritParams module_methods +#' @inheritParams pipe_params +#' @inheritParams delay_high +#' @inheritParams dating +#' @inheritDotParams .smooth_LOESS -incidence_input +#' @inheritDotParams .deconvolve_incidence_Richardson_Lucy -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_sliding_window -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_piecewise_constant -incidence_input -output_HPD +#' @inheritDotParams merge_outputs -output_list -include_index -index_col +#' @inheritDotParams nowcast -incidence_data -delay_until_final_report +#' +#' @inherit combining_observations +#' +#' @return Effective reproductive estimates through time. +#' If \code{output_Re_only} is \code{FALSE}, then transformations made +#' on the input observations during calculations are output as well. +#' @export +estimate_from_combined_observations <- function(partially_delayed_incidence, + fully_delayed_incidence, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + delay_until_partial, + delay_until_final_report, + partial_observation_requires_full_observation = TRUE, + ref_date = NULL, + time_step = ""day"", + output_Re_only = TRUE, + ...) { + .are_valid_argument_values(list( + list(partially_delayed_incidence, ""module_input""), + list(fully_delayed_incidence, ""module_input""), + list(smoothing_method, ""smoothing_method""), + list(deconvolution_method, ""deconvolution_method""), + list(estimation_method, ""estimation_method""), + list(delay_until_partial, ""delay_single_or_list"", .get_input_length(partially_delayed_incidence)), # need to pass length of incidence data as well in order + list(delay_until_final_report, ""delay_single_or_list"", .get_input_length(fully_delayed_incidence)), # to validate when the delay is passed as a matrix + list(partial_observation_requires_full_observation, ""boolean""), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step""), + list(output_Re_only, ""boolean"") + )) + + dots_args <- .get_dots_as_list(...) + + infections_from_partially_delayed_observations <- do.call( + ""get_infections_from_incidence"", + c( + list(partially_delayed_incidence, + smoothing_method = smoothing_method, + deconvolution_method = deconvolution_method, + delay = delay_until_partial, + is_partially_reported_data = partial_observation_requires_full_observation, + delay_until_final_report = delay_until_final_report, + output_infection_incidence_only = TRUE + ), + .get_shared_args( + list( + .smooth_LOESS, + nowcast, + .deconvolve_incidence_Richardson_Lucy, + convolve_delays + ), + dots_args + ) + ) + ) + + delay_until_partial_as_list <- ifelse(.is_single_delay(delay_until_partial), + list(delay_until_partial), + delay_until_partial + ) + + delay_until_final_report_as_list <- ifelse(.is_single_delay(delay_until_final_report), + list(delay_until_final_report), + delay_until_final_report + ) + combined_delay_list <- append(delay_until_partial_as_list, delay_until_final_report_as_list) + + infections_from_fully_delayed_observations <- do.call( + ""get_infections_from_incidence"", + c( + list(fully_delayed_incidence, + smoothing_method = smoothing_method, + deconvolution_method = deconvolution_method, + delay = combined_delay_list, + is_partially_reported_data = FALSE, + output_infection_incidence_only = TRUE + ), + .get_shared_args( + list( + .smooth_LOESS, + .deconvolve_incidence_Richardson_Lucy, + convolve_delays + ), + dots_args + ) + ) + ) + + all_infection_events <- inner_addition(infections_from_partially_delayed_observations, infections_from_fully_delayed_observations) + + estimated_Re <- do.call( + ""estimate_Re"", + c( + list( + incidence_data = all_infection_events, + estimation_method = estimation_method + ), + .get_shared_args( + list( + .estimate_Re_EpiEstim_sliding_window, + .estimate_Re_EpiEstim_piecewise_constant + ), dots_args) + ) + ) + + if (output_Re_only) { + merged_results <- estimated_Re + } else { + test_if_single_output <- try(.is_valid_module_input(estimated_Re, ""estimated_Re""), silent = TRUE) + if (!(""try-error"" %in% class(test_if_single_output))) { + estimated_Re <- list(""Re_estimate"" = estimated_Re) + } + + merged_results <- do.call( + ""merge_outputs"", + c( + list( + output_list = c( + list( + ""partially_delayed_observations"" = partially_delayed_incidence, + ""fully_delayed_observations"" = fully_delayed_incidence, + ""combined_deconvolved_incidence"" = all_infection_events + ), + estimated_Re + ), + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(merge_outputs, dots_args) + ) + ) + } + pretty_results <- do.call( + "".prettify_result"", + c( + list(data = merged_results), + .get_shared_args(.prettify_result, dots_args) + ) + ) + + return(pretty_results) +} + +#' Estimate Re from incidence and estimate uncertainty by bootstrapping +#' +#' +#' An estimation of the effective reproductive number through time is made +#' on the original incidence data. +#' Then, the same estimation is performed on a number of bootstrap samples built from the original incidence data. +#' The estimate on the original data is output along with confidence interval boundaries +#' built from the distribution of bootstrapped estimates. +#' +#' This function allows for combining two different incidence timeseries. +#' The two timeseries can represent events that are differently delayed from the original infection events. +#' The two data sources must not have any overlap in the events recorded. +#' The function can account for the one of the two types of events to require +#' the future observation of the other type of event. +#' For instance, one type can be events of symptom onset, and the other be case confirmation. +#' Typically, the recording of a symptom onset event will require a future case confirmation. +#' If so, the \code{partial_observation_requires_full_observation} flag should be set to \code{TRUE}. +#' +#' @example man/examples/get_bootstrapped_estimates_from_combined_observations.R +#' +#' @inheritParams module_structure +#' @inheritParams module_methods +#' @inheritParams pipe_params +#' @inheritParams bootstrap_params +#' @inheritParams delay_high +#' @inheritParams dating +#' @inheritDotParams .smooth_LOESS -incidence_input +#' @inheritDotParams .deconvolve_incidence_Richardson_Lucy -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_sliding_window -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_piecewise_constant -incidence_input -output_HPD +#' @inheritDotParams merge_outputs -output_list -include_index -index_col +#' @inheritDotParams nowcast -incidence_data -delay_until_final_report +#' +#' @inherit combining_observations +#' @inherit bootstrap_return return +#' +#' @export +get_bootstrapped_estimates_from_combined_observations <- function(partially_delayed_incidence, + fully_delayed_incidence, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + bootstrapping_method = ""non-parametric block boostrap"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + combine_bootstrap_and_estimation_uncertainties = FALSE, + N_bootstrap_replicates = 100, + delay_until_partial, + delay_until_final_report, + partial_observation_requires_full_observation = TRUE, + ref_date = NULL, + time_step = ""day"", + output_Re_only = TRUE, + ...) { + .are_valid_argument_values(list( + list(partially_delayed_incidence, ""module_input""), + list(fully_delayed_incidence, ""module_input""), + list(smoothing_method, ""smoothing_method""), + list(deconvolution_method, ""deconvolution_method""), + list(estimation_method, ""estimation_method""), + list(uncertainty_summary_method, ""uncertainty_summary_method""), + list(combine_bootstrap_and_estimation_uncertainties, ""boolean""), + list(N_bootstrap_replicates, ""non_negative_number""), + list(delay_until_partial, ""delay_single_or_list"", .get_input_length(partially_delayed_incidence)), + list(delay_until_final_report, ""delay_single_or_list"", .get_input_length(fully_delayed_incidence)), + list(partial_observation_requires_full_observation, ""boolean""), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step""), + list(output_Re_only, ""boolean"") + )) + + # TODO later. allow for 'partially_delayed_incidence' or 'fully_delayed_incidence' to be NULL, + # (need to ensure all subsequent functions allow NULL or make if-else) + # TODO later. turn get_block_bootstrapped_estimate into a wrapper around this function with partially_delayed_incidence=NULL + + dots_args <- .get_dots_as_list(...) + + index_col <- ""idx"" + bootstrap_id_col <- ""bootstrap_id"" + + # Precompute delay distribution vector or matrix to avoid repeating costly computations needlessly for each bootstrap sample + delay_distribution_until_partial <- do.call( + ""convolve_delays"", + c( + list( + delays = delay_until_partial, + n_report_time_steps = .get_input_length(partially_delayed_incidence), + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(list( + convolve_delays, + build_delay_distribution, + get_matrix_from_empirical_delay_distr + ), dots_args) + ) + ) + + delay_distribution_partial_to_full <- do.call( + ""convolve_delays"", + c( + list( + delays = delay_until_final_report, + n_report_time_steps = .get_input_length(partially_delayed_incidence), + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(list( + convolve_delays, + build_delay_distribution, + get_matrix_from_empirical_delay_distr + ), dots_args) + ) + ) + + if (partial_observation_requires_full_observation) { + partially_delayed_incidence <- do.call( + ""nowcast"", + c( + list( + incidence_data = partially_delayed_incidence, + delay_until_final_report = delay_distribution_partial_to_full + ), + .get_shared_args(nowcast, dots_args) + ) + ) + } + + estimate_from_combined_observations_dots_args <- .get_shared_args( + list( + .smooth_LOESS, + .deconvolve_incidence_Richardson_Lucy, + .estimate_Re_EpiEstim_sliding_window, + .estimate_Re_EpiEstim_piecewise_constant, + get_matrix_from_empirical_delay_distr, + build_delay_distribution + ), + dots_args + ) + + original_result <- do.call( + ""estimate_from_combined_observations"", + c( + list( + partially_delayed_incidence = partially_delayed_incidence, + fully_delayed_incidence = fully_delayed_incidence, + smoothing_method = smoothing_method, + deconvolution_method = deconvolution_method, + estimation_method = estimation_method, + delay_until_partial = delay_distribution_until_partial, + delay_until_final_report = delay_distribution_partial_to_full, + partial_observation_requires_full_observation = FALSE, + ref_date = NULL, + output_Re_only = FALSE, + include_index = TRUE, + index_col = index_col, + output_HPD = combine_bootstrap_and_estimation_uncertainties + ), + estimate_from_combined_observations_dots_args + ) + ) + + if (combine_bootstrap_and_estimation_uncertainties) { + # Keep the Re estimation HPDs for later + Re_HPDs <- original_result %>% + dplyr::select(.data[[index_col]], .data$Re_highHPD, .data$Re_lowHPD) + + # Remove them from the original_result variable + original_result <- original_result %>% + dplyr::select(!c(.data$Re_highHPD, .data$Re_lowHPD)) + } else { + Re_HPDs <- NULL + } + + original_result[[bootstrap_id_col]] <- 0 + + bootstrapping_results <- list(original_result) + + for (i in 1:N_bootstrap_replicates) { + bootstrapped_partially_delayed_incidence <- do.call( + ""get_bootstrap_replicate"", + c( + list( + incidence_data = partially_delayed_incidence, + bootstrapping_method = bootstrapping_method + ), + .get_shared_args( + list( + .block_bootstrap, + .block_bootstrap_overlap_func, + .smooth_LOESS + ), + dots_args + ) + ) + ) + + bootstrapped_fully_delayed_incidence <- do.call( + ""get_bootstrap_replicate"", + c( + list( + incidence_data = fully_delayed_incidence, + bootstrapping_method = bootstrapping_method + ), + .get_shared_args( + list( + .block_bootstrap, + .block_bootstrap_overlap_func, + .smooth_LOESS + ), + dots_args + ) + ) + ) + + bootstrapped_estimate <- do.call( + ""estimate_from_combined_observations"", + c( + list( + partially_delayed_incidence = bootstrapped_partially_delayed_incidence, + fully_delayed_incidence = bootstrapped_fully_delayed_incidence, + smoothing_method = smoothing_method, + deconvolution_method = deconvolution_method, + estimation_method = estimation_method, + delay_until_partial = delay_distribution_until_partial, + delay_until_final_report = delay_distribution_partial_to_full, + partial_observation_requires_full_observation = FALSE, + ref_date = NULL, + output_Re_only = FALSE, + include_index = TRUE, + index_col = index_col, + outputHPD = FALSE + ), + estimate_from_combined_observations_dots_args + ) + ) + + bootstrapped_estimate[[bootstrap_id_col]] <- i + + bootstrapping_results <- c(bootstrapping_results, list(bootstrapped_estimate)) + } + + bootstrapped_estimates <- dplyr::bind_rows(bootstrapping_results) + + original_estimates <- bootstrapped_estimates %>% + dplyr::filter(.data[[bootstrap_id_col]] == 0) + + bootstrapped_estimates <- bootstrapped_estimates %>% + dplyr::filter(.data[[bootstrap_id_col]] > 0) + + estimates_with_uncertainty <- do.call( + ""do_uncertainty_summary"", + c( + list( + original_values = original_estimates, + bootstrapped_values = bootstrapped_estimates, + uncertainty_summary_method = uncertainty_summary_method, + value_col = ""Re_estimate"", + bootstrap_id_col = bootstrap_id_col, + index_col = index_col, + output_Re_only = output_Re_only, + combine_bootstrap_and_estimation_uncertainties = combine_bootstrap_and_estimation_uncertainties, + Re_HPDs = Re_HPDs + ), + .get_shared_args(.summarise_CI_bootstrap, dots_args) + ) + ) + + + if (!is.null(ref_date)) { + estimates_with_uncertainty <- .add_date_column(estimates_with_uncertainty, + ref_date = ref_date, + time_step = time_step, + index_col = index_col, + keep_index_col = FALSE + ) + } + + pretty_results <- do.call( + "".prettify_result"", + c( + list(data = estimates_with_uncertainty), + .get_shared_args(.prettify_result, dots_args) + ) + ) + + return(pretty_results) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-arguments.R",".R","1230","43","#' Get the names of the arguments of a function +#' +#' @param func function +#' +#' @return names of the arguments of \code{func} +.get_arg_names <- function(func) { + return(names(formals(func))) +} + +#' Return dots arguments as list. +#' +#' If there are no dots arguments, then return an empty list. +#' +#' @param ... dots arguments. +#' +#' @return list containing dots arguments or empty list +.get_dots_as_list <- function(...) { + if (...length() > 0) { + dots_args <- list(...) + } else { + dots_args <- list() + } + return(dots_args) +} + +#' Get the arguments which apply to a function among a given list of arguments. +#' +#' This function is used to find which arguments should be passed +#' to a list of functions among the dot arguments passed to a higher-level function. +#' +#' @param func_list list of functions +#' @param dots_args list of arguments +#' +#' @return elements of \code{dots_args} which can be passed to \code{func_list} +.get_shared_args <- function(func_list, dots_args) { + if (is.function(func_list)) { + func_arg_names <- .get_arg_names(func_list) + } else if (is.list(func_list)) { + func_arg_names <- unlist(lapply(func_list, .get_arg_names)) + } + return(dots_args[names(dots_args) %in% func_arg_names]) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-general.R",".R","44","3","# Useful operator +`%!in%` <- Negate(`%in%`) +","R" +"Nowcasting","covid-19-Re/estimateR","R/deconvolve.R",".R","7954","194","#' Infer infection events dates from delayed observations +#' +#' This function reconstructs an incidence of infection events +#' from incidence data representing delayed observations. +#' The assumption made is that delayed observations represent +#' the convolution of the time series of infections with a delay distribution. +#' \code{deconvolve_incidence} implements a deconvolution algorithm (Richardson-Lucy) to reconstruct +#' a vector of infection events from input data that represents delayed observations. +#' +#' @example man/examples/deconvolve_incidence.R +#' @inheritParams module_methods +#' @inherit module_structure +#' @inheritParams delay_high +#' @inheritDotParams convolve_delays +#' @inheritDotParams .deconvolve_incidence_Richardson_Lucy -incidence_input +#' +#' @export +deconvolve_incidence <- function(incidence_data, + deconvolution_method = ""Richardson-Lucy delay distribution"", + delay, + simplify_output = TRUE, + ...) { + + # TODO be careful if we relax the constraints on incidence_data: + # .get_input_length(incidence_data) may not make sense anymore + # TODO need to make sure whether delays can be matrices with nrows > length(incidence_data) + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(deconvolution_method, ""deconvolution_method""), + list(simplify_output, ""boolean""), + list(delay, ""delay_single_or_list"", .get_input_length(incidence_data)) + )) + + dots_args <- .get_dots_as_list(...) + input <- .get_module_input(incidence_data) + + total_delay_distribution <- do.call( + ""convolve_delays"", + c( + list( + delays = delay, + n_report_time_steps = .get_input_length(input) + ), + .get_shared_args(list( + convolve_delays, + build_delay_distribution, + get_matrix_from_empirical_delay_distr + ), dots_args) + ) + ) + + if (deconvolution_method == ""Richardson-Lucy delay distribution"") { + deconvolved_incidence <- do.call( + "".deconvolve_incidence_Richardson_Lucy"", + c( + list( + incidence_input = input, + delay_distribution = total_delay_distribution + ), + .get_shared_args(.deconvolve_incidence_Richardson_Lucy, dots_args) + ) + ) + } else if (deconvolution_method == ""none"") { + deconvolved_incidence <- input + } else { + deconvolved_incidence <- .make_empty_module_output() + } + + if (simplify_output) { + deconvolved_incidence <- .simplify_output(deconvolved_incidence) + } + + return(deconvolved_incidence) +} + +#' Deconvolve the incidence input with the Richardson-Lucy (R-L) algorithm +#' +#' @inheritParams inner_module +#' @inheritParams universal_params +#' @param delay_distribution numeric square matrix or vector. +#' @param threshold_chi_squared numeric scalar. Threshold for chi-squared values under which the R-L algorithm stops. +#' @param max_iterations integer. Maximum threshold for the number of iterations in the R-L algorithm. +#' @inherit module_structure +.deconvolve_incidence_Richardson_Lucy <- function(incidence_input, + delay_distribution, + threshold_chi_squared = 1, + max_iterations = 100, + verbose = FALSE) { + .are_valid_argument_values(list( + list(incidence_input, ""module_input""), + list(delay_distribution, ""computation_ready_delay_object"", .get_input_length(incidence_input)), + list(threshold_chi_squared, ""non_negative_number""), + list(max_iterations, ""non_negative_number""), + list(verbose, ""boolean"") + )) + + incidence_vector <- .get_values(incidence_input) + length_original_vector <- length(incidence_vector) + first_recorded_incidence <- incidence_vector[1] + penultimate_recorded_incidence <- incidence_vector[length_original_vector - 1] + last_recorded_incidence <- incidence_vector[length_original_vector] + + if (NCOL(delay_distribution) == 1) { # delay_distribution is not a matrix yet. + n_time_units_left_extension <- .get_time_steps_quantile(delay_distribution, quantile = 0.99) + initial_shift <- .get_time_steps_quantile(delay_distribution, quantile = 0.5) + + delay_distribution_matrix <- .get_delay_matrix_from_delay_distributions( + delay_distribution, + N = length(incidence_vector) + n_time_units_left_extension + ) + } else { + delay_distribution_matrix <- delay_distribution + + if (NCOL(delay_distribution_matrix) < length_original_vector) { + stop(""The dimension of 'delay_distribution' cannot be smaller than the length of 'incidence_input'."") + } + n_time_units_left_extension <- NCOL(delay_distribution_matrix) - length_original_vector + + initial_shift <- min( + n_time_units_left_extension, + .get_time_steps_quantile(delay_distribution_matrix[, 1], quantile = 0.5) + ) + } + + # Here we could decide to either extend with zeroes when we know it's zero, or with an extrapolation of the early values + # With zeroes does it have the benefit that whatever value we have in the deconvolved values, there is no effect on optim step? + original_incidence <- c(rep(0, times = n_time_units_left_extension), incidence_vector) + + ### Richardson-Lucy algorithm + + ## Initial step + # Prepare vector with initial guess for first step of deconvolution + # Here we extend with extrapolation of last values + present_trend <- last_recorded_incidence/penultimate_recorded_incidence + if(is.infinite(present_trend) || is.nan(present_trend)) {present_trend <- 1 } + + if(initial_shift >= 1){ + right_padding_values <- last_recorded_incidence * present_trend^(1:initial_shift) + } else { + right_padding_values <- c() + } + current_estimate <- c(incidence_vector, right_padding_values) + + extra_left_steps <- n_time_units_left_extension - initial_shift + + if (extra_left_steps > 0) { + current_estimate <- c(rep(first_recorded_incidence, times = extra_left_steps), current_estimate) + } else if (extra_left_steps < 0) { + stop(""Initial shift in R-L algo should be less than the number of steps padded on the left side."") + } + + chi_squared <- Inf + count <- 1 + + truncated_delay_distribution_matrix <- delay_distribution_matrix[(1 + n_time_units_left_extension):NROW(delay_distribution_matrix), , drop = F] + + observation_probability <- apply(truncated_delay_distribution_matrix, MARGIN = 2, sum) + + if (verbose) { + cat(""\tStart of Richardson-Lucy algorithm\n"") + } + + ## Iterative steps + while (chi_squared > threshold_chi_squared & count <= max_iterations) { + if (verbose) { + cat(""\t\tStep: "", count, "" - Chi squared: "", chi_squared, ""\n"") + } + + convolved_estimate <- as.vector(delay_distribution_matrix %*% current_estimate) + ratio_original_to_reconstructed <- tidyr::replace_na(original_incidence / convolved_estimate, 0) + + current_estimate <- current_estimate / observation_probability * as.vector(crossprod(ratio_original_to_reconstructed, delay_distribution_matrix)) + current_estimate <- tidyr::replace_na(current_estimate, 0) + + chi_squared <- 1 / length_original_vector * + sum((convolved_estimate[(n_time_units_left_extension + 1):length(convolved_estimate)] - + original_incidence[(n_time_units_left_extension + 1):length(original_incidence)])^2 / + convolved_estimate[(n_time_units_left_extension + 1):length(convolved_estimate)], na.rm = T) + + count <- count + 1 + } + + if (verbose) { + cat(""\tEnd of Richardson-Lucy algorithm\n"") + } + + additional_offset <- -initial_shift + # Remove first and last values as they cannot be properly inferred + #TODO investigate with simulations if this can be shortened + final_estimate <- current_estimate[(1 + extra_left_steps):(length(current_estimate) - initial_shift)] + + return(.get_module_output(final_estimate, .get_offset(incidence_input), additional_offset)) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/estimateR.R",".R","24789","508","#' @importFrom magrittr %>% +#' @importFrom rlang .data +#' @importFrom rlang := +NULL + +#' Utility functions for input validity. +#' +#' @param string_user_input A string containing the value that the user passed for the tested string type parameter. +#' @param parameter_name A string containing the name the tested parameter had in the initial function in which it was passed. +#' @param incidence_data_length A number representing the length of the given incidence data. +#' @param user_inputs A list of lists with two elements: the first is the value of the parameter to be tested. The second is the expected type of that parameter. +#' @param number The value to be tested +#' @param user_input The variable to be tested +#' +#' @return TRUE if all tests were passed. Throws an error otherwise. +#' +#' @name validation_utility_params +NULL + +#' Module structure characteristics +#' +#' @param incidence_data An object containing incidence data through time. +#' It can either be: +#' \itemize{ +#' \item A list with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the incidence recorded on consecutive time steps. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the first value in \code{values} is shifted compared to a reference time step +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' } +#' \item A numeric vector. The vector corresponds to the \code{values} element +#' descrived above, and \code{index_offset} is implicitely zero. +#' This means that the first value in \code{incidence_data} +#' is associated with the reference time step (no shift towards the future or past). +#' } +#' @param import_incidence_data NULL or argument with the same requirements as \code{incidence_data}. +#' If not NULL, this argument represents records of imported cases and +#' \code{incidence_data} represents local cases only. +#' @param partially_delayed_incidence An object containing incidence data through time. +#' It can be: +#' \itemize{ +#' \item A list with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the incidence recorded on consecutive time steps. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the first value in \code{values} is shifted compared to a reference time step +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' } +#' \item A numeric vector. The vector corresponds to the \code{values} element +#' descrived above, and \code{index_offset} is implicitely zero. +#' This means that the first value in \code{incidence_data} +#' is associated with the reference time step (no shift towards the future or past). +#' } +#' @param fully_delayed_incidence An object containing incidence data through time. +#' It can be: +#' \itemize{ +#' \item A list with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the incidence recorded on consecutive time steps. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the first value in \code{values} is shifted compared to a reference time step +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' } +#' \item A numeric vector. The vector corresponds to the \code{values} element +#' descrived above, and \code{index_offset} is implicitely zero. +#' This means that the first value in \code{incidence_data} +#' is associated with the reference time step (no shift towards the future or past). +#' } +#' @param simplify_output boolean. Return a numeric vector instead of module +#' output object if output offset is zero? +#' +#' +#' @return A list with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the result of the computations on the input data. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the result is shifted compared to an \code{index_offset} of \code{0}. +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' Note that the \code{index_offset} of the output of the function call +#' accounts for the (optional) \code{index_offset} of the input. +#' } +#' If \code{index_offset} is \code{0} and \code{simplify_output = TRUE}, +#' the \code{index_offset} is dropped and the \code{values} +#' element is returned as a numeric vector. +#' +#' +#' @name module_structure +NULL + + +#' Details on combining observations +#' @details +#' With this function, one can specify two types of delayed observations of +#' infection events (in the same epidemic). The two incidence records are +#' passed with the \code{partially_delayed_incidence} and \code{fully_delayed_incidence}. +#' These two types of delayed observations must not overlap with one another: +#' a particular infection event should not be recorded in both time series. +#' +#' If the two sets of observations are completely independent from one another, +#' meaning that they represents two different ways infection events +#' can be observed, with two different delays +#' then set \code{partial_observation_requires_full_observation} to \code{FALSE}. +#' Note that a particular infection events should NOT be recorded twice: +#' it cannot be recorded both in \code{partially_delayed_incidence} and in \code{fully_delayed_incidence}. +#' +#' An alternative use-case is when the two sets of observations are not independent +#' from one another. For instance, if to record a ""partially-delayed"" event, +#' one had to wait to record it as a ""fully-delayed"" event first. +#' A typical example of this occurs when recording symptom onset events: +#' in most cases, you must first wait until a case is confirmed via a positive test result +#' to learn about the symptom onset event (assuming the case was symptomatic in the first place). +#' But you typically do not have the date of onset of symptoms +#' for all cases confirmed (even assumed they were all symptomatic cases). +#' In such a case, we set the \code{partial_observation_requires_full_observation} flag +#' to \code{TRUE} and we call the incidence constructed from events of +#' symptom onset \code{partially_delayed_incidence} and +#' the incidence constructed from case confirmation events +#' \code{fully_delayed_incidence}. +#' The delay from infection to symptom onset events is +#' specified with the \code{delay_until_partial} argument. +#' The delay from symptom onset to positive test in this example is +#' specified with the \code{delay_until_final_report} argument. +#' Note that, for a particular patient, +#' if the date of onset of symptom is known, the patient must not be counted again +#' in the incidence of case confirmation. +#' Otherwise, the infection event would have been counted twice. +#' +#' +#' @name combining_observations +NULL + +#' Inner module option characteristics +#' +#' @param incidence_input,input,output,input_a,input_b Module input object. +#' List with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the incidence recorded on consecutive time steps. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the first value in \code{values} is shifted compared to a reference time step +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' } +#' +#' @return Module input object. +#' List with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the incidence recorded on consecutive time steps. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the first value in \code{values} is shifted compared to a reference time step +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' } +#' @name inner_module +NULL + +#' EpiEstim wrappers arguments +#' +#' @param minimum_cumul_incidence Numeric value. +#' Minimum number of cumulated infections before starting the Re estimation. +#' Default is \code{12} as recommended in Cori et al., 2013. +#' @param mean_serial_interval Numeric positive value. \code{mean_si} for \code{\link[EpiEstim]{estimate_R}} +#' @param std_serial_interval Numeric positive value. \code{std_si} for \code{\link[EpiEstim]{estimate_R}} +#' @param mean_Re_prior Numeric positive value. \code{mean prior} for \code{\link[EpiEstim]{estimate_R}} +#' @param output_HPD Boolean. If TRUE, return the highest posterior density interval with the output. +#' @param import_incidence_input NULL or module input object. +#' List with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the incidence recorded on consecutive time steps. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the first value in \code{values} is shifted compared to a reference time step +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' } +#' If not NULL, this data represents recorded imported cases. +#' And then \code{incidence_input} represents only local cases. +#' +#' @return If \code{output_HPD = FALSE}, +#' value is a module object (a list of the same kind as \code{incidence_input}). +#' The \code{values} element of the list then contains the Re estimates. +#' If \code{output_HPD = TRUE}, a list of three module objects is returned. +#' \itemize{ +#' \item \code{Re_estimate} contains the Re estimates. +#' \item \code{Re_highHPD} and \code{Re_lowHPD} contain +#' the higher and lower boundaries of the HPD interval, +#' as computed by \code{\link[EpiEstim]{estimate_R}} +#' } +#' +#' @name EpiEstim_wrapper +NULL + +#' Universal parameters +#' +#' @param verbose Boolean. Print verbose output? +#' +#' @name universal_params +NULL + +#' Pipe parameters +#' +#' @param output_Re_only boolean. Should the output only contain Re estimates? +#' (as opposed to containing results for each intermediate step) +#' @param output_infection_incidence_only boolean. +#' Should the output contain only the estimated infection incidence? +#' (as opposed to containing results for intermediary steps) +#' +#' @name pipe_params +NULL + +#' Bootstrapping parameters +#' +#' @param N_bootstrap_replicates integer. Number of bootstrap samples. +#' @param combine_bootstrap_and_estimation_uncertainties boolean. +#' If TRUE, the uncertainty interval reported is the union of +#' the highest posterior density interval from the Re estimation +#' with the confidence interval from the boostrapping of time series +#' of observations. +#' +#' @name bootstrap_params +NULL + +#' Bootstrapping pipe +#' +#' @return Effective reproductive estimates through time with confidence interval boundaries. +#' If \code{output_Re_only} is \code{FALSE}, then transformations made +#' on the input observations during calculations are output as well. +#' +#' @name bootstrap_return +NULL + +#' High-level delay parameters +#' +#' @param delays List of delays, with flexible structure. +#' Each delay in the \code{delays} list can be one of: +#' \itemize{ +#' \item{a list representing a distribution object} +#' \item{a discretized delay distribution vector} +#' \item{a discretized delay distribution matrix} +#' \item{a dataframe containing empirical delay data} +#' } +#' @param delay Single delay or list of delays. +#' Each delay can be one of: +#' \itemize{ +#' \item{a list representing a distribution object} +#' \item{a discretized delay distribution vector} +#' \item{a discretized delay distribution matrix} +#' \item{a dataframe containing empirical delay data} +#' } +#' @param delay_distribution_final_report +#' Distribution of the delay between the events collected in the incidence data +#' and the a posteriori observations of these events. +#' @param gap_to_present Integer. Default value: 0. +#' Number of time steps truncated off from the right tail of the raw incidence data. +#' See Details for more details. +#' @param cutoff_observation_probability value between 0 and 1. +#' Only datapoints for timesteps that have a probability of observing a event +#' higher than \code{cutoff_observation_probability} are kept. +#' The few datapoints with a lower probability to be observed are trimmed off +#' the tail of the timeseries. +#' @param is_partially_reported_data boolean. +#' Set to \code{TRUE} if \code{incidence_data} represents delayed observations +#' of infection events that themselves rely on further-delayed observations. +#' @param delay_until_partial Single delay or list of delays. +#' Each delay can be one of: +#' \itemize{ +#' \item{a list representing a distribution object} +#' \item{a discretized delay distribution vector} +#' \item{a discretized delay distribution matrix} +#' \item{a dataframe containing empirical delay data} +#' } +#' @param delay_until_final_report Single delay or list of delays. +#' Each delay can be one of: +#' \itemize{ +#' \item{a list representing a distribution object} +#' \item{a discretized delay distribution vector} +#' \item{a discretized delay distribution matrix} +#' \item{a dataframe containing empirical delay data} +#' } +#' @param partial_observation_requires_full_observation boolean +#' Set to \code{TRUE} if \code{partially_delayed_incidence} represent +#' delayed observations of infection events that +#' themselves rely on further-delayed observations. +#' See Details for more details. +#' +#' @name delay_high +NULL + +#' Empirical delays parameters +#' +#' @param empirical_delays,delays dataframe containing the empirical data. See Details. +#' @param n_report_time_steps integer. Length of the incidence time series in the accompanying analysis. +#' This argument is needed to determine the dimensions of the output matrix. +#' @param min_number_cases integer. +#' Minimal number of cases to build the empirical distribution from. +#' If \code{num_steps_in_a_unit} is \code{NULL}, for any time step T, +#' the \code{min_number_cases} records prior to T are used. +#' If less than \code{min_number_cases} delays were recorded before T, +#' then T is ignored and the \code{min_number_cases} earliest-recorded delays are used. +#' If \code{num_steps_in_a_unit} is given a value, a similar same procedure is applied, +#' except that, now at least \code{min_number_cases} must be taken over a round number of +#' time units. For example, if \code{num_steps_in_a_unit = 7}, and time steps represent consecutive days, +#' to build the distribution for time step T, +#' we find the smallest number of weeks starting from T and going in the past, +#' for which at least \code{min_number_cases} delays were recorded. +#' We then use all the delays recorded during these weeks. +#' Weeks are not meant as necessarily being Monday to Sunday, +#' but simply 7 days in a row, e.g. it can be Thursday-Wednesday. +#' Again, if less than \code{min_number_cases} delays were recorded before T, +#' then T is ignored. +#' We then find the minimum number of weeks, starting from the first recorded delay +#' that contains at least \code{min_number_cases}. +#' +#' @param min_number_cases_fraction numeric. Between 0 and 1. +#' If \code{min_number_cases} is not provided (kept to \code{NULL}), +#' the number of most-recent cases used to build +#' the instant delay distribution is \code{min_number_cases_fraction} +#' times the total number of reported delays. +#' @param min_min_number_cases numeric. Lower bound +#' for number of cases used to build an instant delay distribution. +#' @param upper_quantile_threshold numeric. Between 0 and 1. +#' Argument for internal use. +#' @param fit string. One of ""gamma"" or ""none"". Specifies the type of fit that +#' is applied to the columns of the delay matrix +#' @param date_of_interest Date. Date for which the most recent recorded delays are sought. +#' @param num_steps_in_a_unit Optional argument. +#' Number of time steps in a full time unit (e.g. 7 if looking at weeks). +#' If set, the delays used to build a particular +#' delay distribution will span over a round number of such time units. +#' This option is included for comparison with legacy code. +#' +#' +#' @name delay_empirical +NULL + + + +#' Dating parameters +#' +#' @param ref_date Date. Optional. Date of the first data entry in \code{incidence_data} +#' @param time_step string. Time between two consecutive incidence datapoints. +#' ""day"", ""2 days"", ""week"", ""year""... (see \code{\link[base]{seq.Date}} for details) +#' +#' @name dating +NULL + +#' Simulation parameters +#' +#' @param Rt Numeric vector. Reproductive number values through time. +#' @param mean_SI Numeric positive value. Mean of serial interval of epidemic simulated. +#' @param sd_SI Numeric positive value. Standard deviation of serial interval of epidemic simulated. +#' @param infections Positive integer vector. Course of infections through time. +#' @param noise List specifying the type of noise and its parameters, if applicable. +#' +#' @name simulate +NULL + +#' Methods available for each module +#' +#' @param smoothing_method string. Method used to smooth the original incidence data. +#' Available options are: +#' \itemize{ +#' \item{'LOESS', implemented in \code{\link{.smooth_LOESS}}} +#' } +#' @param deconvolution_method string. Method used to infer timings of infection +#' events from the original incidence data (aka deconvolution step). +#' Available options are: +#' \itemize{ +#' \item{'Richardson-Lucy delay distribution', +#' implemented in \code{\link{.deconvolve_incidence_Richardson_Lucy}}} +#' } +#' @param estimation_method string. Method used to estimate reproductive number +#' values through time from the reconstructed infection timings. +#' Available options are: +#' \itemize{ +#' \item{'EpiEstim sliding window', +#' implemented in \code{\link{.estimate_Re_EpiEstim_sliding_window}}} +#' \item{'EpiEstim piecewise constant', +#' implemented in \code{\link{.estimate_Re_EpiEstim_piecewise_constant}}} +#' } +#' @param uncertainty_summary_method string. One of the following options: +#' \itemize{ +#' \item{'NONE' if no summary of bootstrap estimates is required} +#' \item{'original estimate - CI from bootstrap estimates'. +#' The confidence interval is built using bootstrapped estimates +#' and centered around the original estimates.} +#' \item{'bagged mean - CI from bootstrap estimates'. +#' The confidence interval is built using bootstrapped estimates +#' and centered around the mean of bootstrapped estimates and original estimates.} +#' } +#' @param bootstrapping_method string. Method to perform bootstrapping +#' of the original incidence data. +#' Available options are: +#' \itemize{ +#' \item{'non-parametric block boostrap', +#' implemented in \code{\link{.block_bootstrap}}} +#' } +#' +#' @name module_methods +NULL + +#' Distribution +#' +#' @param distribution list. probability distribution specified in list format +#' e.g. list(name = ""gamma"", shape = 2, scale = 4). +#' The \code{distribution} list must contain a 'name' element, this element must be a string and +#' correspond to one of the types of \code{\link[stats:Distributions]{distributions}} supported in the \link[stats]{Distributions} package. +#' \code{distribution} must also contain parameters for the specified distribution, in the form '\code{parameter_name=parameter_value}'. +#' @param vector_a,vector_b,delay_distribution_vector discretized probability distribution vector +#' @param matrix_a,matrix_b,delay_distribution_matrix discretized delay distribution matrix +#' @param quantile Value between 0 and 1. Quantile of the distribution. +#' +#' @name distribution +NULL + +#' Empirical delay data format +#' +#' @details An \code{empirical_delays} dataframe must contain (at least) two columns. +#' An 'event_date' column of type \code{Date} +#' and a 'report_delay' column of type \code{numeric}. +#' Each row represents the recording of a single delay between event and observation. +#' Typically, the 'event' here is the onset of symptoms of the disease of interest. +#' And the observation can be, for instance, case confirmation, hospital admission, +#' admission to an ICU, or death, depending on what the incidence data represents. +#' For a particular row, 'event_date' would then represent, for a single individual, +#' the date at which symptoms appeared. And 'report_delay' would represent the number +#' of time steps (as specified by \code{time_step}) until the observation was made +#' for this same individual. +#' +#' @name empirical_delay_data_format +NULL + +#' Uncertainty summary +#' +#' @param uncertainty_summary_method One of these options: +#' \itemize{ +#' \item{'original estimate - CI from bootstrap estimates'. +#' The confidence interval is built using bootstrapped estimates +#' and centered around the original estimates.} +#' \item{'bagged mean - CI from bootstrap estimates'. +#' The confidence interval is built using bootstrapped estimates +#' and centered around the mean of bootstrapped estimates and original estimates.} +#' } +#' @param original_values Optional. Values of reference +#' used to construct the uncertainty interval around. +#' Typically, these are estimates obtained on the original data. +#' Must be a dataframe with a timestep index column named \code{index_col} +#' and a value column named \code{value_col}. +#' The index column must not contain any \code{NA} value. +#' @param bootstrapped_values Bootstrap +#' replicates of the original data. +#' Must be a dataframe in the long format +#' with a timestep index column named \code{index_col}, +#' a bootstrap replicate index column named \code{bootstrap_id_col}, +#' and a value column named \code{value_col}. +#' The index column must not contain any \code{NA} value. +#' @param central_values Values around which the confidence interval is going to be centered. +#' Must be a dataframe with a timestep index column named \code{index_col} +#' and a value column named \code{value_col}. +#' The index column must not contain any \code{NA} value. +#' @param alpha value between 0 and 1. Confidence level of the confidence interval. +#' @param combine_bootstrap_and_estimation_uncertainties boolean. +#' Combine uncertainty from Re estimation with uncertainty from observation process? +#' If \code{TRUE}, the credible intervals for Re estimates must be passed via \code{Re_HPDs}. +#' The output credible intervals +#' will be the union of bootstrapping intervals and Re estimation intervals. +#' @param Re_HPDs Optional. Credible intervals for Re estimates. +#' Use only if \code{combine_bootstrap_and_estimation_uncertainties} is \code{TRUE}. +#' @param value_col string. Name of the column containing values. +#' @param bootstrap_id_col string. Name of the column containing bootstrap samples numbering. +#' Id 0 must correspond to values associated to the original data. +#' @param index_col string. Name of the index column. +#' The index tracks which data point in bootstrapped values +#' corresponds to which data point in the original values. +#' @param output_value_col string. Name of the output column with estimated values. +#' @param prefix_up string. prefix to add to \code{output_value_col} +#' to name the column containing the upper limit of confidence interval +#' @param prefix_down string. prefix to add to \code{output_value_col} +#' to name the column containing the lower limit of confidence interval +#' +#' @name uncertainty +NULL + +#' Module object utilities +#' +#' @param index_col string. Index column to keep track of which data point +#' in bootstrapped estimates corresponds to which data point in the original estimates. +#' +#' @name module_objects +NULL +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-output.R",".R","8495","263","#' Convert module output object into tibble +#' +#' @inherit inner_module +#' @param output_name string. Name to be given to the \code{values} column +#' @param index_col string. Name of the index column included in the output. +#' If a \code{ref_date} is passed to the function, the result will contain +#' a \code{date} column instead. +#' Even so, a value must be given to this argument for internal steps. +#' @inherit dating +#' +#' @example man/examples/make_tibble_from_output.R +#' +#' @return tibble +#' @export +make_tibble_from_output <- function(output, + output_name = ""value"", + index_col = ""idx"", + ref_date = NULL, + time_step = ""day"") { + .are_valid_argument_values(list( + list(output, ""module_input""), + list(output_name, ""string""), + list(index_col, ""string""), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step"") + )) + + tmp_output <- .get_module_input(output) + indices <- seq(from = .get_offset(tmp_output), by = 1, length.out = length(.get_values(tmp_output))) + + formatted_output <- dplyr::tibble(!!index_col := indices, !!output_name := .get_values(tmp_output)) + + if (!is.null(ref_date)) { + formatted_output <- .add_date_column( + estimates = formatted_output, + ref_date = ref_date, + time_step = time_step, + index_col = index_col, + keep_index_col = FALSE + ) + } + + return(formatted_output) +} + + +#' Make empty module output object +#' +#' @return empty module output object +.make_empty_module_output <- function() { + return(list(""values"" = NA_real_, ""index_offset"" = 0)) +} + +#' Transform a result output of a module into a module output 'object' +#' +#' Also takes the module input object \code{input} +#' given to the module to calculate the offset of the output object. +#' The new offset is simply (module input offset) + (shift applied during module operations) +#' The shift applied during module operations is the \code{offset} argument. +#' +#' @param results numeric vector containing output of module operations. +#' @param original_offset integer. Input offset before computations. +#' @param additional_offset integer. Shift resulting from operations performed in module. +#' +#' @inherit module_structure return +.get_module_output <- function(results, original_offset, additional_offset = 0) { + .are_valid_argument_values(list( + list(results, ""numeric_vector""), + list(original_offset, ""integer""), + list(additional_offset, ""integer"") + )) + if (length(results) == 0) { + return(.make_empty_module_output()) + } + + new_offset <- original_offset + additional_offset + + while (is.na(results[1])) { + results <- results[-1] + new_offset <- new_offset + 1 + + if (length(results) == 0) { + return(.make_empty_module_output()) + } + } + + return(list(""values"" = results, ""index_offset"" = new_offset)) +} + +#' Add dates column to dataframe. +#' +#' @param estimates dataframe. Estimates. +#' @param keep_index_col boolean. Keep index column in output? +#' @inherit dating +#' @inherit uncertainty +#' +#' @return estimates dataframe with dates column. +.add_date_column <- function(estimates, + ref_date, + time_step, + index_col = ""idx"", + keep_index_col = FALSE) { + .are_valid_argument_values(list( + list(estimates, ""estimates"", index_col), + list(ref_date, ""date""), + list(time_step, ""time_step""), + list(index_col, ""string""), + list(keep_index_col, ""boolean"") + )) + + dates <- seq.Date( + from = ref_date + min(estimates[[index_col]]), + by = time_step, + along.with = estimates[[index_col]] + ) + + estimates <- estimates %>% + dplyr::arrange(.data[[index_col]]) %>% + dplyr::mutate(date = dates) + + estimates <- dplyr::select(estimates, date, tidyselect::everything()) + + if (!keep_index_col) { + estimates <- estimates %>% + dplyr::select(-.data[[index_col]]) + } + + return(estimates) +} + +#' Merge multiple module outputs into tibble +#' +#' Output tibble from list of unsynced module outputs, with an optional date column. +#' The optional \code{ref_date} argument is the starting date of an input with offset 0. +#' In general, this will be the date corresponding to the first entry in the original incidence data. +#' If a reference date is provided with \code{ref_date}, a date column is appended to the tibble, +#' with sequential dates generated with the time step specified by the \code{time_step} parameter. +#' +#' @example man/examples/merge_outputs.R +#' +#' @param output_list named list of module output objects. +#' @param include_index boolean. Include an index column in output? +#' @param index_col string. If \code{include_index} is \code{TRUE}, +#' an index column named \code{index_col} is added to the output. +#' @inherit dating +#' +#' @return tibble +#' @export +merge_outputs <- function(output_list, + ref_date = NULL, + time_step = ""day"", + include_index = is.null(ref_date), + index_col = ""idx"") { + .are_valid_argument_values(list( + list(ref_date, ""null_or_date""), + list(time_step, ""time_step""), + list(index_col, ""string""), + list(include_index, ""boolean"") + )) + for (i in 1:length(output_list)) { + .are_valid_argument_values(list(list(output_list[[i]], ""module_input""))) + } + + tibble_list <- lapply( + 1:length(output_list), + function(i) { + make_tibble_from_output( + output = output_list[[i]], + output_name = names(output_list)[i], + index_col = index_col + ) + } + ) + + merged_outputs <- plyr::join_all(tibble_list, by = index_col, type = ""full"") %>% + dplyr::arrange(.data[[index_col]]) + + if (!is.null(ref_date)) { + dates <- seq.Date( + from = ref_date + min(merged_outputs[[index_col]]), + by = time_step, + along.with = merged_outputs[[index_col]] + ) + merged_outputs$date <- dates + merged_outputs <- dplyr::select(merged_outputs, date, tidyselect::everything()) + } + + if (!include_index) { + merged_outputs <- dplyr::select(merged_outputs, -.data[[index_col]]) + } + + return(merged_outputs) +} + +#' Simplify output object if possible +#' +#' If offset is 0, return only vector containing values. +#' If offset is not zero then \code{output} is returned as is. +#' +#' @param output Module output object +#' +#' @return numeric vector or module output object. +.simplify_output <- function(output) { + .are_valid_argument_values(list(list(output, ""module_input""))) + + if (.get_offset(output) == 0) { + return(.get_values(output)) + } else { + return(output) + } +} + +#' Prettify results of pipe functions by removing leading and tailing NAs +#' +#' @param data Module object or dataframe. +#' If dataframe, must contain a column named \code{index_col} +#' or \code{date_col} (or both), +#' @param index_col string. Name of the index column. +#' @param date_col string. Name of the date column. +#' +#' @return The input dataframe without leading NA rows. +.prettify_result <- function(data, + index_col = ""idx"", + date_col = ""date"") { + .are_valid_argument_values(list( + list(index_col, ""string""), + list(date_col, ""string"") + )) + + if (is.data.frame(data)) { + if (!(index_col %in% names(data) || date_col %in% names(data))) { + stop(""data argument must contain an index column or date column (or both)."") + } + + ref_col <- ifelse(date_col %in% names(data), date_col, index_col) + + # Remove leading rows with only NAs + first_row_to_keep <- data %>% + dplyr::arrange(.data[[ref_col]]) %>% + dplyr::filter(dplyr::if_any(!dplyr::any_of(c(index_col, date_col)), ~ !is.na(.))) %>% + dplyr::slice_head(n = 1) %>% + dplyr::pull(.data[[ref_col]]) + + last_row_to_keep <- data %>% + dplyr::arrange(dplyr::desc(.data[[ref_col]])) %>% + dplyr::filter(dplyr::if_any(!dplyr::any_of(c(index_col, date_col)), ~ !is.na(.))) %>% + dplyr::slice_head(n = 1) %>% + dplyr::pull(.data[[ref_col]]) + + cleaned_data <- data %>% + dplyr::filter( + .data[[ref_col]] >= first_row_to_keep, + .data[[ref_col]] <= last_row_to_keep + ) + + return(cleaned_data) + } else if (is.list(data)) { # This needs to be checked second, because is.list(dataframe) is TRUE. + return(.simplify_output(data)) + } else { + stop(""Data must be a list (module output) or dataframe."") + } +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-empirical_delays.R",".R","18405","391","#' Get delay entries corresponding to a round number of weeks (months...) +#' +#' The main use of this function is to allow comparison with legacy code. +#' +#' @inherit empirical_delay_data_format +#' @inherit delay_empirical +#' +#' @return A vector of length at least \code{min_number_cases}, containing +#' records of delays. +.get_delays_over_full_time_units <- function(delays, + date_of_interest, + num_steps_in_a_unit = 7, + min_number_cases) { + recent_counts <- delays %>% + dplyr::arrange(dplyr::desc(.data$event_date)) %>% + dplyr::filter(.data$event_date <= date_of_interest) + + if (nrow(recent_counts) < min_number_cases) { + first_observation_dates <- delays %>% + dplyr::arrange(.data$event_date) %>% + dplyr::slice_head(n = min_number_cases) %>% + dplyr::pull(.data$event_date) + + max_date <- max(first_observation_dates) + num_steps_since_start <- trunc(as.double(max_date - min(delays$event_date), units = ""auto"")) + + if ((num_steps_since_start %% num_steps_in_a_unit) == 0) { + max_date_with_full_weeks <- max_date + } else { + max_date_with_full_weeks <- max_date + num_steps_in_a_unit - (num_steps_since_start %% num_steps_in_a_unit) + } + + recent_counts_distribution <- delays %>% + dplyr::filter(.data$event_date <= max_date_with_full_weeks) %>% + dplyr::pull(.data$report_delay) + } else { + first_observation_dates <- recent_counts %>% + dplyr::slice_head(n = min_number_cases) %>% + dplyr::pull(.data$event_date) + + min_date <- min(first_observation_dates) + num_steps_since_start <- trunc(as.double(date_of_interest - min_date, units = ""auto"")) + + if ((num_steps_since_start %% num_steps_in_a_unit) == 0) { + min_date_with_full_weeks <- min_date + } else { + min_date_with_full_weeks <- min_date - num_steps_in_a_unit + (num_steps_since_start %% num_steps_in_a_unit) + } + recent_counts_distribution <- delays %>% + dplyr::filter( + .data$event_date >= min_date_with_full_weeks, + .data$event_date <= date_of_interest + ) %>% + dplyr::pull(.data$report_delay) + } + return(recent_counts_distribution) +} + + +#' Build matrix of delay distributions through time from empirical delay data. +#' +#' This function takes a record of delays between events and their observations +#' and builds a discretized delay distribution matrix from this record. +#' The discretized delay distribution matrix +#' is required for the application of the Richardson-Lucy algorithm. +#' The main benefit of providing empirical delay data to an \code{estimateR} analysis, +#' as opposed to specifiying a delay as a single distribution +#' (whether a fitted or empirical distribution) is that the variability of +#' the delays through time is used to inform the analysis and provide more accurate estimates. +#' If the average of delays has shifted from 5 days to 3 days between the beginning and end +#' of epidemic of interest, this will be reflected in the recorded empirical delays +#' and will be accounted for by \code{estimateR} when estimating the reproductive number. +#' +#' The \code{ref_date} argument here will be understood as the date of the first record in the incidence data +#' for which the empirical delay data will be used. +#' If \code{ref_date} is not provided, the reference date will be taken as being the earliest date in the +#' \code{event_date} column of \code{empirical_delays}. In other words, the date of the first record in the incidence data +#' will be assumed to be the same as the date of the first record in the empirical delay data. +#' If this is not the case in your analysis, make sure to specify a \code{ref_date} argument. +#' +#' @example man/examples/get_matrix_from_empirical_delay_distr.R +#' +#' @inherit empirical_delay_data_format +#' +#' @inherit delay_empirical +#' @inheritParams dating +#' @inheritParams .get_delay_matrix_column +#' +#' @return a discretized delay distribution matrix. +#' @export +get_matrix_from_empirical_delay_distr <- function(empirical_delays, + n_report_time_steps, + ref_date = NULL, + time_step = ""day"", + min_number_cases = NULL, + upper_quantile_threshold = 0.99, + min_number_cases_fraction = 0.2, + min_min_number_cases = 500, + fit = ""none"", + return_fitted_distribution = FALSE, + num_steps_in_a_unit = NULL) { + .are_valid_argument_values(list( + list(empirical_delays, ""empirical_delay_data""), + list(n_report_time_steps, ""positive_integer""), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step""), + list(min_number_cases, ""null_or_int""), + list(upper_quantile_threshold, ""numeric_between_zero_one""), + list(min_number_cases_fraction, ""numeric_between_zero_one""), + list(min_min_number_cases, ""positive_integer""), + list(fit, ""delay_matrix_column_fit""), + list(return_fitted_distribution, ""boolean""), + list(num_steps_in_a_unit, ""null_or_int"") + )) + + + if (is.null(ref_date)) { + ref_date <- min(dplyr::pull(empirical_delays, .data$event_date), na.rm = TRUE) + } + + all_report_dates <- seq.Date(from = ref_date, by = time_step, length.out = n_report_time_steps) + + # Ignore the delay data that is posterior to the last incidence report date. + empirical_delays <- empirical_delays %>% + dplyr::filter(.data$event_date <= max(all_report_dates)) + + # Set the 'min_number_cases' parameter if not set by the user + # TODO make this 'min_number_cases' depend on the length of the time_series. + if (is.null(min_number_cases)) { + min_number_cases <- min_number_cases_fraction * nrow(empirical_delays) + min_number_cases <- max(min_number_cases, min_min_number_cases) + } + + # Find the threshold for right-truncation + # No time-variation beyond this threshold due to the fraction of unsampled individuals when nearing the last sampling date + # TODO put the search for threshold_right_truncation in separate utility function + delay_counts <- empirical_delays %>% + dplyr::select(.data$report_delay) %>% + dplyr::group_by(.data$report_delay) %>% + dplyr::summarise(counts = dplyr::n(), .groups = ""drop"") + + threshold_right_truncation <- delay_counts %>% + dplyr::mutate(cumul_freq = cumsum(.data$counts) / sum(.data$counts)) %>% + dplyr::filter(.data$cumul_freq > upper_quantile_threshold) %>% + utils::head(n = 1) %>% + dplyr::pull(.data$report_delay) + + # We left-pad the time range with a number of time steps corresponding in the initial shift in the deconvolution. + # TODO it may be simpler to just do the augmentation during the deconvolution step + initial_shift <- ceiling(stats::quantile(empirical_delays$report_delay, probs = 0.99, na.rm = T))[1] + + # Left-pad the dates we are looking at to account for shift between event dates and observation dates. + time_unit_start <- regexpr(""(day|week|month|quarter|year)"", time_step, ignore.case = TRUE)[1] + if(time_unit_start == 1) { + by_string <- paste0(""-1 "", time_step) + } else { #time_step already contains number of time steps + # it is assumed that time_step does not contain negative numbers, and it does + # contain one of day|week|month|quarter|year + by_string <- paste0(""-"", time_step) + } + + all_dates <- c( + rev(seq.Date(from = ref_date, by = by_string, length.out = initial_shift + 1)), + seq.Date(from = ref_date, by = time_step, length.out = n_report_time_steps)[-1] + ) + + n_time_steps <- n_report_time_steps + initial_shift + + delay_distribution_matrix <- matrix(0, nrow = n_time_steps, ncol = n_time_steps) + + last_varying_col <- dplyr::if_else(n_time_steps > threshold_right_truncation, n_time_steps - threshold_right_truncation, n_time_steps) + + distrib_list <- list() # needed for the test that checks if get_matrix_from_empirical_delay_distr returns a matrix with the expected distributions when using fit = ""gamma"" + + # Shuffle rows so as to get rid of potential biases associated + shuffled_delays <- empirical_delays %>% + dplyr::slice(sample(1:dplyr::n())) + + # Populate the delay_distribution_matrix by column + if (n_time_steps > threshold_right_truncation) { + for (i in 1:last_varying_col) { + if (is.null(num_steps_in_a_unit)) { + # TODO take out in internal function to reduce duplication + recent_counts <- shuffled_delays %>% + dplyr::arrange(dplyr::desc(.data$event_date)) %>% + dplyr::filter(.data$event_date <= all_dates[i]) + + if (nrow(recent_counts) >= min_number_cases) { + # If enough data points before date of interest, + # take most recent observations before this date. + + recent_counts_distribution <- recent_counts %>% + dplyr::slice_head(n = min_number_cases) %>% + dplyr::pull(.data$report_delay) + } else { + # Otherwise, take 'min_number_of_cases' observations, + # even after date of interest. + recent_counts_distribution <- shuffled_delays %>% + dplyr::arrange(.data$event_date) %>% + dplyr::slice_head(n = min_number_cases) %>% + dplyr::pull(.data$report_delay) + } + } else { + recent_counts_distribution <- .get_delays_over_full_time_units( + delays = shuffled_delays, + date_of_interest = all_dates[i], + num_steps_in_a_unit = num_steps_in_a_unit, + min_number_cases = min_number_cases + ) + } + + result <- .get_delay_matrix_column(recent_counts_distribution, fit, col_number = i, n_time_steps, return_fitted_distribution) + if (is.list(result)) { + distrib_list[[i]] <- result$distribution + new_column <- result$column + } else { + new_column <- result + } + delay_distribution_matrix[, i] <- new_column + } + } else { # if n_time_steps <= threshold_right_truncation + + if (is.null(num_steps_in_a_unit)) { + recent_counts <- shuffled_delays %>% + dplyr::arrange(dplyr::desc(.data$event_date)) %>% + dplyr::filter(.data$event_date <= all_dates[1]) + + if (nrow(recent_counts) >= min_number_cases) { + # If enough data points before date of interest, + # take most recent observations before this date. + + recent_counts_distribution <- recent_counts %>% + dplyr::slice_head(n = min_number_cases) %>% + dplyr::pull(.data$report_delay) + } else { + # Otherwise, take 'min_number_of_cases' observations, + # even after date of interest. + recent_counts_distribution <- shuffled_delays %>% + dplyr::arrange(.data$event_date) %>% + dplyr::slice_head(n = min_number_cases) %>% + dplyr::pull(.data$report_delay) + } + } else { + recent_counts_distribution <- .get_delays_over_full_time_units( + delays = shuffled_delays, + date_of_interest = all_dates[1], + num_steps_in_a_unit = num_steps_in_a_unit, + min_number_cases = min_number_cases + ) + } + + + + result <- .get_delay_matrix_column(recent_counts_distribution, fit, col_number = 1, n_time_steps, return_fitted_distribution) + if (is.list(result)) { + distrib_list[[i]] <- result$distribution + new_column <- result$column + } else { + new_column <- result + } + + for (i in 0:(last_varying_col - 1)) { + delay_distribution_matrix[, i + 1] <- c(rep(0, times = i), new_column[1:(length(new_column) - i)]) + if (fit == ""gamma"") { + distrib_list <- append(distrib_list, distrib_list[length(distrib_list)]) + } + } + } + + if (isTRUE(last_varying_col < n_time_steps)) { + for (j in 1:threshold_right_truncation) { + delay_distribution_matrix[, i + j] <- c(rep(0, times = j), delay_distribution_matrix[1:(nrow(delay_distribution_matrix) - j), i]) + if (fit == ""gamma"") { + distrib_list <- append(distrib_list, distrib_list[length(distrib_list)]) + } + } + } + + if (isTRUE(return_fitted_distribution) && fit == ""gamma"") { + return(list(matrix = delay_distribution_matrix, distributions = distrib_list)) + } + + return(delay_distribution_matrix) +} + + +#' Build a specific column of the delay distribution matrix +#' +#' @param recent_counts_distribution numeric vector of report delays, as used in \code{get_matrix_from_empirical_delay_distr} +#' @param fit string. Can be either ""none"" or ""gamma"". Specifies the type of fitting applied to the computed column +#' @param col_number positive integer. The index the computed column has in the delay matrix +#' @param N positive integer. Size of delay matrix. +#' @param return_fitted_distribution boolean. If TRUE, the function also returns the gamma distribution that was fitted to the respective column. +#' +#' @return If \code{return_fitted_distribution = FALSE}, +#' returns the \code{col_number}th column of the delay matrix, based on the vector of report delays given. +#' If \code{return_fitted_distribution = TRUE}, it returns a list with two elements: +#' \code{column} - delay matrix column as described above, +#' and \code{distribution} - the delay distribution that was fitted to the column. +.get_delay_matrix_column <- function(recent_counts_distribution, fit = ""none"", col_number, N, return_fitted_distribution = FALSE) { + .are_valid_argument_values(list( + list(recent_counts_distribution, ""numeric_vector""), + list(fit, ""delay_matrix_column_fit""), + list(col_number, ""positive_integer""), + list(N, ""positive_integer""), + list(return_fitted_distribution, ""boolean"") + )) + i <- col_number + new_column <- c() + + if (fit == ""gamma"") { + gamma_fit <- try(suppressWarnings(fitdistrplus::fitdist(recent_counts_distribution + 1, distr = ""gamma"")), silent = T) + if (""try-error"" %in% class(gamma_fit)) { + # TODO only output this if verbose output + cat("" mle failed to estimate the parameters. Trying method = \""mme\""\n"") + gamma_fit <- fitdistrplus::fitdist(recent_counts_distribution + 1, distr = ""gamma"", method = ""mme"") + # TODO if none work revert to empirical distribution + } + + shape_fit <- gamma_fit$estimate[[""shape""]] + scale_fit <- 1 / gamma_fit$estimate[[""rate""]] + + distribution <- list(name = ""gamma"", shape = shape_fit, scale = scale_fit) + delay_distr <- build_delay_distribution(distribution, offset_by_one = TRUE) + } else { # no fit + delay_distr <- graphics::hist(recent_counts_distribution, breaks = seq(0, N, l = N + 1), plot = FALSE) + delay_distr <- delay_distr$density + } + + if (length(delay_distr) < N - i + 1) { + delay_distr <- c(delay_distr, rep(0, times = N - i + 1 - length(delay_distr))) + } + new_column <- c(rep(0, times = i - 1), delay_distr[1:(N - i + 1)]) + + if (fit == ""gamma"" && return_fitted_distribution == TRUE) { + return(list(column = new_column, distribution = distribution)) + } + return(new_column) +} + + +#' Utility function that generates delay data, assuming a different delay between event and observation for each individual day. +#' It then generates the delay matrix and computes the RMSE between the parameters of the gamma distributions passed as arguments and the ones recovered from the delay matrix. +#' The shapes and scales of the gamma distributions are specified as parameters, and the number of timesteps is assumed to be equal to the length of these vectors. +#' +#' @param original_distribution_shapes vector. Specifies the shapes for the gamma distributions. +#' @param original_distribution_scales vector. Specifies the scales for the gamma distributions. +#' @param nr_distribution_samples integer. How many cases to be sampled for each timestep. +#' +#' @return A list with the computed RMSE. It has two elements: $shape_rmse and $scale_rmse +.delay_distribution_matrix_rmse_compute <- function(original_distribution_shapes, original_distribution_scales, nr_distribution_samples = 500) { + + # Create a vector with all dates in observation interval + start_date <- as.Date(""2021/04/01"") + time_steps <- length(original_distribution_shapes) + end_date <- start_date + time_steps + available_dates <- seq(start_date, end_date, by = ""day"") + + # Build the delay data; Events on each individual day are assumed to be observed according to a different gamma distribution, as specified by original_distribution_shapes and original_distribution_scales, + sampled_report_delays <- c() + report_dates <- as.Date(c()) + for (i in 1:time_steps) { + new_sampled_report_delays <- .sample_from_distribution(list(name = ""gamma"", shape = original_distribution_shapes[i], scale = original_distribution_scales[i]), nr_distribution_samples) + sampled_report_delays <- c(sampled_report_delays, new_sampled_report_delays) + new_report_dates <- rep(available_dates[i], nr_distribution_samples) + report_dates <- c(report_dates, new_report_dates) + } + delay_data <- dplyr::tibble(event_date = report_dates, report_delay = sampled_report_delays) + result <- get_matrix_from_empirical_delay_distr(delay_data, time_steps, fit = ""gamma"", return_fitted_distribution = TRUE) + + delay_matrix <- result$matrix + distrib_list <- result$distributions + + + # Get the shapes and scales of the gamma distributions fitted by the get_matrix_from_empirical_delay_distr function + distribution_shapes <- c() + distribution_scales <- c() + + for (distribution in distrib_list) { + distribution_shapes <- c(distribution_shapes, distribution$shape) + distribution_scales <- c(distribution_scales, distribution$scale) + } + + # Compute the RMSE between the desired gamma distribution shapes and scales, and the ones obtained by the get_matrix_from_empirical_delay_distr function + start_index <- length(distribution_shapes) - length(original_distribution_shapes) + 1 + shape_rmse <- Metrics::rmse(distribution_shapes[start_index:length(distribution_shapes)], original_distribution_shapes) / mean(original_distribution_shapes) + scale_rmse <- Metrics::rmse(distribution_scales[start_index:length(distribution_scales)], original_distribution_scales) / mean(original_distribution_scales) + + return(list(shape_rmse = shape_rmse, scale_rmse = scale_rmse)) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-simulate.R",".R","7953","202","#' Round a value to the integer to either the floor or ceiling value, based on a random draw. +#' +#' The probability of rounding to the ceiling value is equal to +#' the difference between the unrounded value and its floor value. +#' +#' For instance, 1.3 has 0.3 probability of being rounded to 2, +#' and 0.7 probability of being rounded to 1. +#' +#' @param observations A vector of numeric values. +#' +#' @return Randomly-rounded observations: vector of integers. +.random_round <- function(observations){ + # We ensure that the returned number of observations is an integer + # But we don't just round, otherwise we never see values close to zero. + rounded_observations <- sapply(observations, function(x) { + floor(x) + stats::rbinom(n = 1, size = 1, prob = x - floor(x)) }) + + return(rounded_observations) +} + + +#' Compute the discretized infectiousness for a particular time step. +#' +#' The gamma distribution specified is not directly the serial interval but the serial interval +#' minus one, to allow for a gamma distribution (otherwise infectiousness at day 0 could not be handled), +#' see Cori et al. 2013 for more details. +#' +#' @param k Integer. Infex of the time step. +#' @param shapeG shape of the gamma distribution of the serial interval -1. +#' @param scaleG scale of the gamma distribution of the serial interval -1. +#' +#' @return value of the infectiousness profile at time step k. +.compute_discretized_infectiousness <- function(k, shapeG=2.73, scaleG=1.39) { + ### Expression from Cori et al. 2013, Web appendix 11 + w <- k * stats::pgamma(k, shape=shapeG, scale=scaleG) + + (k-2)* stats::pgamma(k-2, shape=shapeG, scale=scaleG) + + (-2) * (k-1) * stats::pgamma(k-1, shape=shapeG, scale=scaleG) + + shapeG * scaleG * (2 * stats::pgamma(k-1, shape=shapeG+1, scale=scaleG) - + stats::pgamma(k-2, shape=shapeG+1, scale=scaleG) - + stats::pgamma(k, shape=shapeG+1, scale=scaleG)) + + return(w) +} + +#' Compute a discretized infectiousness profile from a serial interval distribution. +#' +#' The serial interval is assumed to be gamma-distributed here, +#' following Cori et al. 2013. +#' +#' @inheritParams simulate +#' @param stopping_threshold Numeric value between 0 and 1. +#' Threshold on cumulative sum of infectiousness profile returned. +#' There is little reason to change to something else than the default value. +#' +#' @return Discretized infectiousness profile through time, +#' represented as a numeric vector with the first value being the infectiousness at time step 0 +#' and each subsequent value being the infectiousness on the time step after the previous value. +.get_infectiousness_profile <- function(mean_SI = 4.8, sd_SI = 2.3, stopping_threshold = 0.999999){ + + shapeG <- (mean_SI - 1)^2 / sd_SI^2 + scaleG <- sd_SI^2 / (mean_SI - 1) + + sum_probability_mass <- 0 + day <- 0 + infectiousness_profile <- c() + while(sum_probability_mass < stopping_threshold) { + infectiousness_profile[day + 1] <- .compute_discretized_infectiousness(day, shapeG, scaleG) + sum_probability_mass <- sum_probability_mass + infectiousness_profile[day + 1] + day <- day + 1 + } + + return(infectiousness_profile) +} + +#' Draw the number of infections for a particular time step. +#' +#' @inheritParams simulate +#' @param incidence Numeric vector. Incidence of infections through time before the current time step. +#' @param day Integer. Index of the current time step. +#' @param infectiousness_profile Numeric vector. Discretized infectiousness values through time. +#' +#' @return Positive integer. Simulated number of infections for the current time step. +.draw_It <- function(Rt, incidence, day, infectiousness_profile) { + if(length(Rt) < day || day <1) { + return(0) + } + + compute_element_in_sum <- function(x) { + if(x > (length(infectiousness_profile) - 1) || x >= day || x < 1) { + return(0) + } else { + return(incidence[day - x] * infectiousness_profile[x + 1]) + } + } + summed_infectiousness <- sum(sapply(1:(day-1), compute_element_in_sum)) + sampled_infected_incidence <- stats::rpois(1, Rt[day] * summed_infectiousness) + return(sampled_infected_incidence) +} + +#' Compute the number of delayed observations of infections at the current time step. +#' +#' @param infections Vector representing infections through time. +#' @param delay_distribution Numeric vector or matrix. Discretized delay distribution represented as a vector (matrix). +#' @param day Integer. index of the current time step. +#' +#' @return Integer. Number of observations made on a particular time step. +.compute_Ot <- function(infections, delay_distribution, day) { + if(day <1) { + return(0) + } + + if(is.matrix(delay_distribution)) { + if(day <= nrow(delay_distribution)) { + delay_distribution_vector <- as.vector(delay_distribution[day, day:1]) + } else { + delay_distribution_vector < c(0) + } + } else { + delay_distribution_vector <- delay_distribution + } + compute_element_in_sum <- function(x) { + if(x > (length(delay_distribution_vector) - 1) || x >= day || x < 0) { + return(0) + } else { + return(infections[day - x] * delay_distribution_vector[x + 1]) + } + } + raw_summed_observations <- sum(sapply(0:(day-1), compute_element_in_sum)) + + return(.random_round(raw_summed_observations)) +} + + +#' Add noise to a series of observations. +#' +#' @param observations Numeric vector. Series of observations through time. +#' @param noise List specifying the type of noise and its parameters, if applicable. +#' +#' @return Positive integer vector. Noisy observations. +.add_noise <- function(observations, noise = list(type = 'iid_noise_sd', sd = 1)){ + + if (noise$type == 'gaussian'){ + mult_noise <- stats::rnorm(length(observations), mean = 1, sd = noise$sd) + observations = mult_noise * observations + } + + if (noise$type == 'iid_noise_sd'){ + mult_noise <- stats::rnorm(length(observations), mean = 0, sd = noise$sd) + + # y = mu * residual + observations = observations * exp(mult_noise) # so the error is iid log-normal + } + + if(noise$type == 'autocorrelated'){ + ar_coeffs <- noise$ar + AR_noise <- stats::arima.sim(model = list(ar = ar_coeffs), n = length(observations), sd = noise$sd) + + observations = observations * exp(as.vector(AR_noise)) + } + + if (noise$type == 'noiseless'){ + # No noise added. + observations = observations + } + + observations = .random_round(observations) + observations[observations < 0] = 0 + + return(observations) +} + + +#' Generate a list of delay distributions with a gradual transition between two input delay distributions +#' +#' The initial and final delay distributions must be parameterized as gamma distributions with shape and scale parameters. +#' The intermediary distributions are parameterized by scales and shapes that are linear interpolations between the +#' initial and final shapes and scales. +#' @param init_delay List. First delay distribution in the output list +#' @param final_delay List. Last delay distribution in hte output list +#' @param n_time_steps Integer. Number of output list +#' +#' @return List of delay distributions (specified as lists) +.build_list_of_gradually_changing_delays <- function(init_delay, final_delay, n_time_steps) { + #TODO enforce that both delays are distributions specified as lists and are gamma distributions with scale and shape + + init_distrib_name <- init_delay$name + final_distrib_name <- final_delay$name + + if(init_distrib_name != final_distrib_name | init_distrib_name!= ""gamma"") { + stop(""init_delay and final_delay must be two gamma distributions."") + } + + gradual_shapes <- seq(from = init_delay$shape, to = final_delay$shape, length.out = n_time_steps) + gradual_scales <- seq(from = init_delay$scale, to = final_delay$scale, length.out = n_time_steps) + + list_of_distributions <- lapply(1:n_time_steps, function(i) { + return(list(name = ""gamma"", scale = gradual_scales[i], shape = gradual_shapes[i])) + }) + + return(list_of_distributions) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/data.R",".R","3165","68","#' Delay between date of onset of symptoms of COVID-19 and date of case confirmation in Hong Kong +#' +#' A dataset compiling the delay (in days) between symptom onset and case report +#' for close to 3'000 individual cases. +#' This dataset is truncated in time latest and spans from January 2020 to July 2020. +#' This data was aggregated from the publicly-available linelist data for the COVID-19 epidemic in Hong Kong. +#' The linelist data is published by the Centre for Health Protection in Hong Kong. +#' +#' @format A data frame with 2,948 rows and 2 variables: +#' \describe{ +#' \item{event_date}{date of symptom onset in YYYY-mm-dd format} +#' \item{report_delay}{number of days between symptom onset and case report} +#' } +#' @source \url{https://www.chp.gov.hk} +""HK_delay_data"" + + +#' Incidence data for COVID-19 in Hong Kong +#' +#' A dataset containing aggregated incidence data for Hong Kong from January 2020 to July 2021. +#' This data was put together by aggregating the publicly-available linelist data for SARS-CoV-2 in Hong Kong. +#' The linelist data is published by the Centre for Health Protection in Hong Kong. +#' +#' @format A data frame with 196 rows and 4 variables: +#' \describe{ +#' \item{date}{date of case reporting in YYYY-mm-dd format} +#' \item{case_incidence}{total number of case confirmations +#' reported on this date} +#' \item{onset_incidence}{number of events of onset of symptoms +#' occurring on this date} +#' \item{report_incidence}{number of case confirmations reported on this date +#' with no known date of onset of symptoms (or asymptomatoc cases)} +#' } +#' @source \url{https://www.chp.gov.hk} +""HK_incidence_data"" + +#' Incidence data for COVID-19 in Estonia +#' +#' A dataset containing aggregated incidence data for Estonia from February 2020 to May 2021. +#' The linelist data is published by the Estonian public health authorities. +#' +#' @format A data frame with 460 rows and 2 variables: +#' \describe{ +#' \item{date}{date of case reporting in YYYY-mm-dd format} +#' \item{case_incidence}{number of cases reported on this date} +#' } +#' @source \url{https://opendata.digilugu.ee/opendata_covid19_tests_total.csv} +""EST_incidence_data"" + +#' Synthetic linelist of COVID-19 patients +#' +#' A synthetic dataset containing a simulated linelist of Swiss COVID-19 patients from February to June 2020. +#' The incidence that can be derived from aggregating this synthetic linelist +#' corresponds to or closely matches the +#' incidence numbers reported for Switzerland by the Federal Office of Public Health. +#' +#' In this simulated dataset, each row refers to a particular patient. +#' The 'confirmation_date' column refers to the date at which a positive COVID-19 test is reported. +#' For each patient, with a probability < 1, a date of onset of symptoms was drawn from a probability distribution. +#' Otherwise, the date of onset of symptoms was left as NA. +#' +#' @format A data frame with 31'950 rows and 2 columns: +#' \describe{ +#' \item{confirmation_date}{date of case confirmation in YYYY-mm-dd format} +#' \item{symptom_onset_date}{If simulated, date of onset of symptoms in YYYY-mm-dd format} +#' } +""CH_simulated_linelist"" +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-nowcast.R",".R","6201","125","#' Correct incidence data for yet-to-be-observed fraction of events +#' +#' Use this function to correct the tail of an incidence time series +#' if incidence was collected following a subsequent observation event. +#' For instance, if the incidence represents people starting to show symptoms of a disease +#' (dates of onset of symptoms), the data would typically have been collected among +#' individuals whose case was confirmed via a test. +#' If so, among all events of onset of symptoms, only those who had time to be +#' confirmed by a test were reported. +#' Thus, close to the present, there is an under-reporting of onset of symptoms events. +#' In order to account for this effect, this function divides each incidence value +#' by the probability of an event happening at a particular time step to have been observed. +#' Typically, this correction only affects the few most recent data points. +#' +#' A trimming is done at the tail of the time series to avoid correcting for time steps +#' for which the observation probability is too low, which could result in too uncertain corrected values. +#' This trimming is tuned via the \code{cutoff_observation_probability} argument. +#' +#' The \code{gap_to_present} represents the number of time steps truncated off on the right end of the raw data. +#' If no truncation was done, \code{gap_to_present} should be kept at its default value of 0. +#' A truncation can be done when latest reported numbers are too unreliable, e.g. in a monitoring situation +#' the latest X days of data can be deemed not worth keeping if they are not well consolidated. +#' An alternative to this truncation is actually to nowcast the observed incidence using this function and a delay +#' distribution representing the consolidation delay. +#' Contrary to best-practice nowcasting methods, this function only provides a maximum-likelihood estimator +#' of the acual incidence, it does not include uncertainty around this estimator. +#' +#' +#' The \code{ref_date} argument is only needed if the \code{delay_until_final_report} +#' is passed as a dataframe of individual delay observations (a.k.a empirical delay data). +#' In that case, \code{ref_date} must correspond to the date of the first time step in \code{incidence_data}. +#' +#' @example man/examples/nowcast.R +#' +#' @inherit module_structure +#' @inherit delay_high +#' @inherit dating +#' @inheritDotParams get_matrix_from_empirical_delay_distr -empirical_delays -n_report_time_steps -return_fitted_distribution +#' +#' @export +nowcast <- function(incidence_data, + delay_until_final_report, + cutoff_observation_probability = 0.33, + gap_to_present = 0, + ref_date = NULL, + time_step = ""day"", + ...) { + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(delay_until_final_report, ""delay_single_or_list"", .get_input_length(incidence_data)), + list(cutoff_observation_probability, ""numeric_between_zero_one""), + list(ref_date, ""null_or_date""), + list(time_step, ""time_step"") + )) + + input <- .get_module_input(incidence_data) + incidence_vector <- .get_values(input) + n_time_steps <- length(incidence_vector) + gap_to_present + + dots_args <- .get_dots_as_list(...) + + delay_distribution_final_report <- do.call( + ""convolve_delays"", + c( + list( + delay = delay_until_final_report, + n_report_time_steps = n_time_steps, + ref_date = ref_date, + time_step = time_step + ), + .get_shared_args(list( + convolve_delays, + build_delay_distribution, + get_matrix_from_empirical_delay_distr + ), dots_args) + ) + ) + + if (NCOL(delay_distribution_final_report) == 1) { + # delay_distribution_final_report is a vector, we build a delay distr matrix from it + delay_distribution_matrix_final_report <- .get_delay_matrix_from_delay_distributions(delay_distribution_final_report, + N = n_time_steps + ) + } else { + # delay_distribution_final_report is a matrix, we truncate off the extra initial columns (required for R-L algo only) + initial_offset <- ncol(delay_distribution_final_report) - n_time_steps + 1 + delay_distribution_matrix_final_report <- delay_distribution_final_report[ + initial_offset:nrow(delay_distribution_final_report), + initial_offset:ncol(delay_distribution_final_report) + ] + } + + Q_vector_observation_to_final_report <- apply(delay_distribution_matrix_final_report, MARGIN = 2, sum) + + # TODO improve this error + # if (any(is.na(Q_vector_observation_to_final_report)) || isTRUE(any(Q_vector_observation_to_final_report == 0, na.rm = FALSE))) { + if (any(is.na(Q_vector_observation_to_final_report))) { + warning(""Invalid delay_until_final_report argument."") + } + # TODO need to make sure that the matrix is the same size (as opposed to having extra columns leading) + # TODO test with matrix delay + # TODO we need to send an error if non zero incidence but zero probability of observation probably need a minimum value to replace zeroes by + # TODO fix incidence vector if NaN or Inf values (there is cutoff anyway) + + # Remove the trailing 'gap_to_present' elements from Q. + Q_vector_observation_to_final_report <- Q_vector_observation_to_final_report[1:length(incidence_vector)] + + incidence_vector <- incidence_vector / Q_vector_observation_to_final_report + + # Now we cut off values at the end of the time series, + # those dates for which the probability of having observed an event that happened on that date is too low + # We define 'too low' as being below a 'cutoff_observation_probability' + tail_values_below_cutoff <- which(rev(Q_vector_observation_to_final_report) < cutoff_observation_probability) + + if (length(tail_values_below_cutoff) == 0) { + cutoff <- 0 + } else { + cutoff <- max(tail_values_below_cutoff) + } + + truncated_incidence_vector <- incidence_vector[1:(length(incidence_vector) - cutoff)] + + return(.get_module_output(truncated_incidence_vector, .get_offset(input))) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-convolution.R",".R","10223","284","#' Convolve two discretized probability distribution vectors. +#' +#' @inheritParams distribution +#' +#' @return discretized probability distribution vector +.convolve_delay_distribution_vectors <- function(vector_a, vector_b) { + .are_valid_argument_values(list( + list(vector_a, ""probability_distr_vector""), + list(vector_b, ""probability_distr_vector"") + )) + + # Right-pad vectors with zeroes to bring them to the same length + final_length <- length(vector_b) + length(vector_a) + vector_a <- c(vector_a, rep(0, times = final_length - length(vector_a))) + vector_b <- c(vector_b, rep(0, times = final_length - length(vector_b))) + + # Initialize result vector + vector_c <- rep(0, times = final_length) + + # Fill result vector + for (i in 1:final_length) { + reversed_vector_b <- rev(vector_b[1:i]) # Reverse vector_b truncated at index i + reversed_vector_b <- c(reversed_vector_b, rep(0, times = final_length - i)) # Right-pad with zeroes + vector_c[i] <- vector_a %*% reversed_vector_b # Compute dot product between vectors + } + + return(vector_c) +} + +#' Convolve a delay distribution vector with a delay distribution matrix +#' +#' @inheritParams distribution +#' @param vector_first Does the delay described by \code{vector_a} happen before +#' the one from \code{matrix_b}? +#' +#' @return discretized delay distribution matrix +.convolve_delay_distribution_vector_with_matrix <- function(vector_a, matrix_b, vector_first = TRUE) { + .are_valid_argument_values(list( + list(vector_a, ""probability_distr_vector""), + list(matrix_b, ""probability_distr_matrix"", 0), + list(vector_first, ""boolean"") + )) + if (vector_first) { + # Increase size of matrix_b to account for the fact that the output matrix will be shifted in time by the vector_a delay + n_col_augment <- .get_time_steps_quantile(vector_a, quantile = 0.5) + matrix_b <- .left_augment_delay_distribution( + delay_distribution_matrix = matrix_b, + n_col_augment = n_col_augment + ) + } + + N <- nrow(matrix_b) + # Right-pad vector with zeroes to bring to same dimension as square matrix + vector_a <- c(vector_a, rep(0, times = max(0, N - length(vector_a)))) + + # Initialize result matrix + convolved_matrix <- matrix(0, nrow = N, ncol = N) + + # Iterate over columns (each column represents the delay distribution on a specific date) + for (j in 1:N) { + # Iterate over rows + for (i in 0:(N - j)) { + if (vector_first) { # Take corresponding row in matrix_b + # The row is left-truncated (only j to N indices) so as to start at same + # date (date with index j) as column in convolved matrix + matrix_b_elements <- matrix_b[i + j, j:(j + i)] + } else { # Take corresponding column in matrix_b (and revert it) + matrix_b_elements <- matrix_b[(i + j):j, j] + } + + truncated_vector_a <- vector_a[1:(i + 1)] + convolved_matrix[i + j, j] <- truncated_vector_a %*% matrix_b_elements + } + } + + return(convolved_matrix) +} + +# TODO if used, need to consider if left augmentation is required like for .convolve_delay_distribution_vector_with_matrix +#' Convolve two delay distribution matrices +#' +#' Note that this convolution operation is not commutative! +#' The order matters: here the delay implied by \code{matrix_a} happens first, +#' then the one implied by \code{matrix_b}. +#' @inheritParams distribution +#' +#' @return convolved discretized delay distribution matrix +.convolve_delay_distribution_matrices <- function(matrix_a, matrix_b) { + stop(""This function is not ready.\n + Need to consider if left augmentation is required like for .convolve_delay_distribution_vector_with_matrix"") + .are_valid_argument_values(list( + list(matrix_a, ""probability_distr_matrix"", 0), + list(matrix_b, ""probability_distr_matrix"", 0) + )) + + if (nrow(matrix_a) != nrow(matrix_b)) { + stop(""Convolved matrices must have the same dimensions."") + } + + N <- nrow(matrix_a) + # Initialize result matrix + convolved_matrix <- matrix(0, nrow = N, ncol = N) + + # Iterate over columns (each column represents the delay distribution on a specific date) + for (j in 1:N) { + # Iterate over rows + for (i in 0:(N - j)) { + + # Take truncated column of matrix_a (first delay applied) + matrix_a_elements <- matrix_a[j:(j + i), j] + # Take truncated row of matrix_b (second delay applied) + matrix_b_elements <- matrix_b[i + j, j:(j + i)] + + convolved_matrix[i + j, j] <- matrix_a_elements %*% matrix_b_elements + } + } + return(convolved_matrix) +} + +#' Convolve a list of delay vectors or matrices +#' +#' @param delay_distributions list of discretized delay distribution vector or matrix +#' +#' @return discretized delay distribution vector (if all input delays are +#' vectors) or matrix +.convolve_delay_distributions <- function(delay_distributions) { + .are_valid_argument_values(list( + # We put '1' here, because we do not care here about checking the dimension of the matrix. + list(delay_distributions, ""delay_single_or_list"", 1) + )) + + number_of_delays_in_list <- length(delay_distributions) + + if (number_of_delays_in_list == 1) { + return(delay_distributions[[1]]) + } + + if (number_of_delays_in_list == 2) { + return(.convolve_two_delay_distributions(delay_distributions[[1]], delay_distributions[[2]])) + } + + last_delay <- delay_distributions[[number_of_delays_in_list]] + + delay_distributions[number_of_delays_in_list] <- NULL + convolved_without_last_delay <- .convolve_delay_distributions(delay_distributions) + + return(.convolve_two_delay_distributions(convolved_without_last_delay, last_delay)) +} + + +#' Convolve two delay vectors or matrices +#' +#' @param first_delay discretized delay distribution vector or matrix +#' @param second_delay discretized delay distribution vector or matrix +#' +#' @return discretized delay distribution vector (if both input delays are +#' vectors) or matrix +.convolve_two_delay_distributions <- function(first_delay, + second_delay) { + .are_valid_argument_values(list( + list(first_delay, ""delay_object"", 1), + list(second_delay, ""delay_object"", 1) + )) # + + if (.is_numeric_vector(first_delay)) { + if (.is_numeric_vector(second_delay)) { + return(.convolve_delay_distribution_vectors(first_delay, second_delay)) + } else if (is.matrix(second_delay)) { + return(.convolve_delay_distribution_vector_with_matrix( + vector_a = first_delay, + matrix_b = second_delay, + vector_first = TRUE + )) + } else { + stop(""'second_delay' must be a numeric vector or matrix."") + } + } else if (is.matrix(first_delay)) { + if (.is_numeric_vector(second_delay)) { + return(.convolve_delay_distribution_vector_with_matrix( + matrix_b = first_delay, + vector_a = second_delay, + vector_first = FALSE + )) + } else if (is.matrix(second_delay)) { + # TODO work on .convolve_delay_distribution_matrices() + stop(""Convolution of two matrices is not available."") + # return(.convolve_delay_distribution_matrices(first_delay, second_delay)) + } else { + stop(""'second_delay' must be a numeric vector or matrix."") + } + } else { + stop(""'first_delay' must be a numeric vector or matrix."") + } +} + +#' Convolve delay distributions +#' +#' Take a list of delay distributions and return their convolution. +#' The convolution of a delay A -> B and a delay B -> C corresponds to the +#' delay distribution of A -> C. +#' Delays are assumed to happen in the same chronological order as the order +#' they are given in in the \code{delays} list. +#' +#' +#' This function is flexible in the type of delay inputs it can handle. +#' Each delay in the \code{delays} list can be one of: +#' \itemize{ +#' \item{a list representing a distribution object} +#' \item{a discretized delay distribution vector} +#' \item{a discretized delay distribution matrix} +#' \item{a dataframe containing empirical delay data} +#' } +#' +#' see \code{\link{get_matrix_from_empirical_delay_distr}} for details on the format +#' expected for the empirical delay data. +#' +#' @example man/examples/convolve_delays.R +#' +#' @inherit delay_high +#' @param n_report_time_steps integer. Length of incidence time series. +#' Use only when providing empirical delay data. +#' @inheritParams dating +#' @inheritDotParams get_matrix_from_empirical_delay_distr -empirical_delays -return_fitted_distribution +#' @inheritDotParams build_delay_distribution -distribution +#' +#' @return a discretized delay distribution vector or matrix. +#' A vector is returned when input delay distributions are constant through time: +#' either they are vectors already or in the form of a list-specified distribution. +#' A matrix is returned when at least one of the delays has a delay distribution +#' that can change through time. This is the case with empirical delay data +#' or if any of the input is already a delay distribution matrix. +#' +#' @export +convolve_delays <- function(delays, + n_report_time_steps = NULL, + ...) { + .are_valid_argument_values(list( + # We put '1' here, because we do not care here about checking the dimension of the matrix. + list(delays, ""delay_single_or_list"", 1), + list(n_report_time_steps, ""null_or_int"") + )) + + + dots_args <- .get_dots_as_list(...) + + if (.is_single_delay(delays)) { + delay_distribution <- do.call( + "".get_delay_distribution"", + c( + list( + delay = delays, + n_report_time_steps = n_report_time_steps + ), + .get_shared_args(list( + get_matrix_from_empirical_delay_distr, + build_delay_distribution + ), dots_args) + ) + ) + return(delay_distribution) + } else { + delay_distributions <- lapply( + delays, + function(delay) { + delay_distribution <- do.call( + "".get_delay_distribution"", + c( + list( + delay = delay, + n_report_time_steps = n_report_time_steps + ), + .get_shared_args(list( + get_matrix_from_empirical_delay_distr, + build_delay_distribution + ), dots_args) + ) + ) + } + ) + + return(.convolve_delay_distributions(delay_distributions)) + } +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/estimate_Re.R",".R","15353","400","#' Estimate the effective reproductive number Re through time from incidence data +#' +#' \code{estimate_Re()} takes the number of infections through time +#' and computes the Re value through time (also known as Rt). +#' +#' The incidence input should represent infections, +#' as opposed to representing delayed observations of infections. +#' If the incidence data represents delayed observations of infections, +#' one should first reconstruct the incidence of infections using +#' \code{deconvolve_incidence()} or \code{get_infections_from_incidence()} +#' which wraps around it and includes a smoothing step of the delayed observations. +#' +#' \code{estimate_Re()} wraps around the \code{estimate_R()} function +#' of the \code{EpiEstim} package from Cori et al, 2013. +#' \code{estimate_Re()} allows for two types of Re estimations: +#' \enumerate{ +#' \item A sliding-window estimation. +#' For each time step T, the Re(T) value is computed by assuming that Re is constant +#' for time steps (T-X+1, ..., T-1, T), with X being the sliding window size. +#' This option is chosen by setting \code{estimation_method = ""EpiEstim sliding window""} +#' \item A piecewise-constant estimation. +#' Re(t) is computed as being a piecewise-constant function of time. +#' The length of each step can be a fixed number of time steps. +#' That number is specified using the \code{interval_length} parameter. +#' The length of each step can also be irregular. +#' This can be useful if the boundaries of the steps are meant to coincide with particular events +#' such as the implementation or cancellation of public health interventions. +#' The right boundaries of the steps are specified using the \code{interval_ends} parameter. +#' This option is chosen by setting \code{estimation_method = ""EpiEstim piecewise constant""} +#' } +#' +#' @example man/examples/estimate_Re.R +#' +#' @param simplify_output boolean. Simplify the output when possible? +#' @inheritParams module_methods +#' @inheritParams module_structure +#' @inheritDotParams .estimate_Re_EpiEstim_sliding_window -incidence_input +#' @inheritDotParams .estimate_Re_EpiEstim_piecewise_constant -incidence_input +#' +#' @return A list with two elements: +#' \enumerate{ +#' \item A numeric vector named \code{values}: the result of the computations on the input data. +#' \item An integer named \code{index_offset}: the offset, counted in number of time steps, +#' by which the result is shifted compared to an \code{index_offset} of \code{0}. +#' This parameter allows one to keep track of the date of the first value in \code{values} +#' without needing to carry a \code{date} column around. +#' A positive offset means \code{values} are delayed in the future compared to the reference values. +#' A negative offset means the opposite. +#' Note that the \code{index_offset} of the output of the function call +#' accounts for the (optional) \code{index_offset} of the input. +#' } +#' If \code{index_offset} is \code{0} and \code{simplify_output = TRUE}, +#' the \code{index_offset} is dropped and the \code{values} +#' element is returned as a numeric vector. +#' +#' If \code{output_HPD = TRUE} (additional parameter), +#' the highest posterior density interval boundaries are output along with the mean Re estimates. +#' In that case, a list of three lists is returned: +#' \itemize{ +#' \item \code{Re_estimate} contains the Re estimates. +#' \item \code{Re_highHPD} and \code{Re_lowHPD} contain +#' the higher and lower boundaries of the HPD interval, +#' as computed by \code{\link[EpiEstim]{estimate_R}} +#' } +#' If, in addition, \code{simplify_output = TRUE}, +#' then the 3 elements are merged into a single dataframe by \code{merge_outputs()}. +#' A date column can be added to the dataframe by passing an extra \code{ref_date} argument +#' (see \code{\link{merge_outputs}} for details). +#' +#' +#' @return a module output object. Re estimates. +#' @seealso \code{\link{smooth_incidence}}, \code{\link{deconvolve_incidence}} +#' @export +estimate_Re <- function(incidence_data, + estimation_method = ""EpiEstim sliding window"", + simplify_output = FALSE, + ...) { + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(estimation_method, ""estimation_method""), + list(simplify_output, ""boolean"") + )) + + + dots_args <- .get_dots_as_list(...) + input <- .get_module_input(incidence_data) + + if (estimation_method == ""EpiEstim sliding window"") { + Re_estimate <- do.call( + "".estimate_Re_EpiEstim_sliding_window"", + c( + list(incidence_input = input), + .get_shared_args(.estimate_Re_EpiEstim_sliding_window, dots_args) + ) + ) + } else if (estimation_method == ""EpiEstim piecewise constant"") { + Re_estimate <- do.call( + "".estimate_Re_EpiEstim_piecewise_constant"", + c( + list(incidence_input = input), + .get_shared_args(.estimate_Re_EpiEstim_piecewise_constant, dots_args) + ) + ) + } else { + Re_estimate <- .make_empty_module_output() + } + + if (simplify_output) { + if (.is_list_of_outputs(Re_estimate)) { + Re_estimate <- do.call( + ""merge_outputs"", + c( + list(output_list = Re_estimate), + .get_shared_args(merge_outputs, dots_args) + ) + ) + } else { + Re_estimate <- .simplify_output(Re_estimate) + } + } + + return(Re_estimate) +} + +#' Estimate Re with EpiEstim using a sliding window +#' +#' The Re value reported for time t corresponds to the value estimated +#' when assuming that is Re is constant over e.g. (T-3, T-2, T-1, T), +#' for a sliding window of 4 time steps. +#' +#' @param estimation_window Use with \code{estimation_method = ""EpiEstim sliding window""} +#' Positive integer value. +#' Number of data points over which to assume Re to be constant. +#' @inheritParams inner_module +#' @inherit EpiEstim_wrapper +.estimate_Re_EpiEstim_sliding_window <- function(incidence_input, + import_incidence_input = NULL, + minimum_cumul_incidence = 12, + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + method = ""parametric_si"", + si_distr = c(0,1), + mean_Re_prior = 1, + output_HPD = FALSE) { + .are_valid_argument_values(list( + list(incidence_input, ""module_input""), + list(minimum_cumul_incidence, ""non_negative_number""), + list(estimation_window, ""positive_integer""), + list(mean_serial_interval, ""positive_number""), + list(std_serial_interval, ""non_negative_number""), + list(mean_Re_prior, ""positive_number"") + )) + + if (method == ""non_parametric_si"") { + mean_serial_interval <- weighted.mean(1:length(si_distr), w = si_distr) + } + + incidence_vector <- .get_values(incidence_input) + if (sum(incidence_vector) < minimum_cumul_incidence) { + stop(""minimum_cumul_incidence parameter is set higher than total cumulative incidence."") + } + + if (!is.null(import_incidence_input)) { + .are_valid_argument_values(list( + list(import_incidence_input, ""module_input"") + )) + + incidence <- merge_outputs( + output_list = list( + local = incidence_input, + imported = import_incidence_input + ), + include_index = FALSE + ) %>% + tidyr::replace_na(list(local = 0, imported = 0)) + + incidence_length <- nrow(incidence) + input_offset <- min(.get_offset(incidence_input), .get_offset(import_incidence_input)) + } else { + incidence <- incidence_vector + incidence_length <- length(incidence) + input_offset <- .get_offset(incidence_input) + } + + offset <- which(cumsum(incidence_vector) >= minimum_cumul_incidence)[1] + # We use the criteria on the offset from Cori et al. 2013 + # (and offset needs to be at least two for EpiEstim) + offset <- max(estimation_window, ceiling(mean_serial_interval), offset, 2) + + right_bound <- incidence_length - (estimation_window - 1) + + if (is.na(offset) | right_bound < offset) { + stop(""Cumulative incidence observed in available data window too small."") + } + + # Computation intervals corresponding to every position of the sliding window + t_start <- seq(offset, right_bound) + t_end <- t_start + estimation_window - 1 + + if (method == ""parametric_si"") { + R_instantaneous <- suppressWarnings(EpiEstim::estimate_R( + incid = incidence, + method = ""parametric_si"", + config = EpiEstim::make_config( + list( + mean_si = mean_serial_interval, + std_si = std_serial_interval, + t_start = t_start, + t_end = t_end, + mean_prior = mean_Re_prior + ) + ) + )) + } else if (method == ""non_parametric_si"") { + R_instantaneous <- suppressWarnings(EpiEstim::estimate_R( + incid = incidence, + method = ""non_parametric_si"", + config = EpiEstim::make_config( + list( + si_distr = si_distr, + t_start = t_start, + t_end = t_end, + mean_prior = mean_Re_prior + ) + ) + )) + } else { + stop(""Supplied method for serial interval representation not supported by .estimate_Re_EpiEstim_sliding_window"") + } + + additional_offset <- t_end[1] - 1 + Re_estimate <- .get_module_output( + R_instantaneous$R$`Mean(R)`, + input_offset, + additional_offset + ) + if (output_HPD) { + Re_highHPD <- .get_module_output( + R_instantaneous$R$`Quantile.0.975(R)`, + input_offset, + additional_offset + ) + + Re_lowHPD <- .get_module_output( + R_instantaneous$R$`Quantile.0.025(R)`, + input_offset, + additional_offset + ) + + return(list( + Re_estimate = Re_estimate, + Re_highHPD = Re_highHPD, + Re_lowHPD = Re_lowHPD + )) + } else { + return(Re_estimate) + } +} + +#' Estimate Re with EpiEstim in a piecewise-constant fashion +#' +#' This function returns piecewise-constant Re estimates. +#' +#' @param interval_ends Use with \code{estimation_method = ""EpiEstim piecewise constant""} +#' Integer vector. Optional argument. +#' If provided, \code{interval_ends} overrides the \code{interval_length} argument. +#' Each element of \code{interval_ends} specifies the right boundary +#' of an interval over which Re is assumed to be constant for the calculation. +#' Values in \code{interval_ends} must be integer values corresponding +#' with the same numbering of time steps as given by \code{incidence_input}. +#' In other words, \code{interval_ends} and \code{incidence_input}, +#' use the same time step as the zero-th time step. +#' @param interval_length Use with \code{estimation_method = ""EpiEstim piecewise constant""} +#' Positive integer value. +#' Re is assumed constant over steps of size \code{interval_length}. +#' +#' @inheritParams inner_module +#' @inherit EpiEstim_wrapper +#' +.estimate_Re_EpiEstim_piecewise_constant <- function(incidence_input, + import_incidence_input = NULL, + minimum_cumul_incidence = 12, + interval_ends = NULL, + interval_length = 7, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + mean_Re_prior = 1, + output_HPD = FALSE) { + .are_valid_argument_values(list( + list(incidence_input, ""module_input""), + list(minimum_cumul_incidence, ""non_negative_number""), + list(interval_length, ""positive_integer""), + list(mean_serial_interval, ""number""), + list(std_serial_interval, ""non_negative_number""), + list(mean_Re_prior, ""number"") + )) + + incidence_vector <- .get_values(incidence_input) + + if (sum(incidence_vector) < minimum_cumul_incidence) { + stop(""minimum_cumul_incidence parameter is set higher than total cumulative incidence."") + } + + if (!is.null(import_incidence_input)) { + .are_valid_argument_values(list( + list(import_incidence_input, ""module_input"") + )) + + incidence <- merge_outputs( + output_list = list( + local = incidence_input, + imported = import_incidence_input + ), + include_index = FALSE + ) %>% + tidyr::replace_na(list(local = 0, imported = 0)) + + incidence_length <- nrow(incidence) + input_offset <- min(.get_offset(incidence_input), .get_offset(import_incidence_input)) + } else { + incidence <- incidence_vector + incidence_length <- length(incidence) + input_offset <- .get_offset(incidence_input) + } + + offset <- which(cumsum(incidence_vector) >= minimum_cumul_incidence)[1] + # offset needs to be at least two for EpiEstim + offset <- max(2, offset) + right_bound <- incidence_length + + if (!is.null(interval_ends)) { + .are_valid_argument_values(list( + list(interval_ends, ""integer_vector"") + )) + + # we make these be relative to input_offset + interval_ends <- sort(interval_ends - input_offset) + + interval_ends <- interval_ends[interval_ends > offset & interval_ends <= right_bound] + } else { + interval_ends <- seq(from = offset + interval_length - 1, to = right_bound, by = interval_length) + if (max(interval_ends) < right_bound) { + interval_ends <- c(interval_ends, right_bound) + } + } + + if (length(interval_ends) < 1) { + stop(""No valid interval to estimate Re on. + Check 'minimum_cumul_incidence', 'interval_ends' or 'interval_length' parameters."") + } + + interval_starts <- c(offset, interval_ends[-length(interval_ends)] + 1) + + R_instantaneous <- suppressWarnings(EpiEstim::estimate_R( + incid = incidence, + method = ""parametric_si"", + config = EpiEstim::make_config( + list( + mean_si = mean_serial_interval, + std_si = std_serial_interval, + t_start = interval_starts, + t_end = interval_ends, + mean_prior = mean_Re_prior + ) + ) + )) + + additional_offset <- interval_starts[1] - 1 + + replicate_estimates_on_interval <- function(estimates, interval_starts, interval_ends) { + replicated_estimates <- unlist(lapply( + seq_along(interval_starts), + function(x) { + rep(estimates[x], interval_ends[x] - interval_starts[x] + 1) + } + )) + return(replicated_estimates) + } + + Re_estimate <- replicate_estimates_on_interval(R_instantaneous$R$`Mean(R)`, interval_starts, interval_ends) + Re_estimate <- .get_module_output(Re_estimate, input_offset, additional_offset) + + if (output_HPD) { + Re_highHPD <- replicate_estimates_on_interval(R_instantaneous$R$`Quantile.0.975(R)`, interval_starts, interval_ends) + Re_highHPD <- .get_module_output(Re_highHPD, input_offset, additional_offset) + + Re_lowHPD <- replicate_estimates_on_interval(R_instantaneous$R$`Quantile.0.025(R)`, interval_starts, interval_ends) + Re_lowHPD <- .get_module_output(Re_lowHPD, input_offset, additional_offset) + + return(list( + Re_estimate = Re_estimate, + Re_highHPD = Re_highHPD, + Re_lowHPD = Re_lowHPD + )) + } else { + return(Re_estimate) + } +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/simulate.R",".R","6666","151","#' Simulate a series of infections +#' +#' Perform a simulation of infections through time, based on a reproductive number course, +#' a series of imported cases and a serial interval distribution. +#' +#' @example man/examples/simulate_infections.R +#' +#' @param imported_infections Positive integer vector. +#' Must be of length at least one. Does not need to be the same length as \code{Rt}. +#' If \code{imported_infections} is of greater length than \code{Rt} then \code{Rt} +#' is padded with zeroes to reach the same length. +#' @inheritParams simulate +#' +#' @return Integer vector. Simulated infections through time. +#' @export +simulate_infections <- function(Rt, imported_infections = 1, mean_SI = 4.8, sd_SI = 2.3){ + + .are_valid_argument_values(list( + list(Rt, ""numeric_vector""), + list(imported_infections, ""non_negative_integer_vector""), + list(mean_SI, ""positive_number""), + list(sd_SI, ""positive_number"") + )) + + length_infections <- max(length(Rt), length(imported_infections)) + + # Bring vectors to the same length (length_infections) for future additions + Rt <- c(Rt, rep(0, times = length_infections-length(Rt))) + imported_infections <- c(imported_infections, rep(0, times = length_infections-length(imported_infections))) + + infectiousness_profile <- .get_infectiousness_profile(mean_SI, sd_SI) + + infections <- imported_infections # Initialize with imports + + for(i in 2:length_infections){ + infections[i] = infections[i] + .draw_It(Rt = Rt, + incidence = infections, + day = i, + infectiousness_profile = infectiousness_profile) + } + + return(infections) +} + +#' Simulate a series of delayed observations from a series of infections. +#' +#' @example man/examples/simulate_delayed_observations.R +#' +#' @inheritParams simulate +#' @inheritParams delay_high +#' +#' @return Integer vector. Simulated delayed observations. +#' @export +simulate_delayed_observations <- function(infections, delay, noise = list(type = ""noiseless"")){ + + .are_valid_argument_values(list( + list(infections, ""non_negative_integer_vector""), + list(delay, ""delay_single_or_list"", .get_input_length(infections)), + list(noise, ""noise"") + )) + + total_delay_distribution <- convolve_delays(delays = delay) + + observations <- sapply(1:length(infections), function(x){.compute_Ot(infections, total_delay_distribution, day = x)}) + # Add (optional) noise + observations <- .add_noise(observations, + noise = noise) + + return(observations) +} + +#' Simulate a series of observations from a course of infections, combining some partially-delayed and some fully-delayed observations. +#' +#' The infections that are observed as partially-delayed observations cannot be observed a second time as fully-delayed observations, +#' meaning that they do not show up a second time in the ""fully-delayed"" column of the result. +#' However, a partially-delayed observation can only be ""registered"" (included in the ""partially-delayed"" column) if +#' it is has been virtually observed as a fully-delayed observation first. +#' +#' @inheritParams simulate +#' @inheritParams delay_high +#' @param prob_partial_observation Numeric value between 0 and 1. +#' Probability of an infection to be observed as a partially-delayed observation, +#' instead of as a fully-delayed observation. +#' +#' @example man/examples/simulate_combined_observations.R +#' +#' @return A dataframe containing two columns: +#' a column ""partially_delayed"" containing partially-delayed observations +#' and a column ""fully_delayed"" containing fully-delayed observations. +#' @export +simulate_combined_observations <- function(infections, + delay_until_partial, + delay_until_final_report, + prob_partial_observation, + noise = list(type = 'noiseless')){ + + .are_valid_argument_values(list( + list(infections, ""non_negative_integer_vector""), + list(delay_until_partial, ""delay_single_or_list"", .get_input_length(infections)), + list(delay_until_final_report, ""delay_single_or_list"", .get_input_length(infections)), + list(prob_partial_observation, ""numeric_between_zero_one""), + list(noise, ""noise"") + )) + + delay_until_partial <- .get_delay_distribution(delay_until_partial) + all_partial_observations <- sapply(1:length(infections), + function(x){.compute_Ot(infections, delay_until_partial, day = x)}) + sampled_partial_observations <- sapply(all_partial_observations, + function(x){stats::rbinom(n=1,size = x, prob = prob_partial_observation)}) + + unsampled_partial_observations <- all_partial_observations - sampled_partial_observations + unsampled_partial_observations[unsampled_partial_observations < 0] <- 0 + delay_until_final_report <- .get_delay_distribution(delay_until_final_report) + final_observations <- sapply(1:length(unsampled_partial_observations), + function(x){.compute_Ot(unsampled_partial_observations, delay_until_final_report, day = x)}) + + # Add (optional) noise + final_observations <- .add_noise(final_observations, + noise = noise) + + .discard_unsampled_full_observations <- function(partial_observations, delay_until_final_report){ + delay_distribution <- .get_delay_distribution(delay_until_final_report) + + sampled_partial_observations <- partial_observations + + cdf <- cumsum(delay_distribution) + if(cdf[length(cdf)] < 1) { + cdf <- c(cdf, 1) + } + + max_negative_index <- length(cdf) - 1 + + for (idx in 0:max_negative_index) { + sampled_partial_observations[length(partial_observations) - idx] <- stats::rbinom(n = 1, + size = partial_observations[length(partial_observations) - idx], + prob = cdf[idx + 1]) + } + + return(sampled_partial_observations) + } + + sampled_partial_observations <- .discard_unsampled_full_observations(sampled_partial_observations, + delay_until_final_report) + + # Add (optional) noise + sampled_partial_observations <- .add_noise(sampled_partial_observations, + noise = noise) + + return(data.frame(partially_delayed=sampled_partial_observations, fully_delayed = final_observations)) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/bootstrap.R",".R","5669","160","#' Obtain a bootstrap replicate from incidence data +#' +#' Apply a bootstrapping procedure on incidence data. +#' Estimating Re over many bootstrapped replicates allows one to estimate +#' the uncertainty over the estimated Re value due to observation noise. +#' For now, only a non-parametric block bootstrapping function is implemented. +#' +#' @example man/examples/get_bootstrap_replicate.R +#' @inheritParams module_methods +#' @inherit module_structure +#' @inheritDotParams .block_bootstrap -incidence_input +#' +#' @export +get_bootstrap_replicate <- function(incidence_data, + bootstrapping_method = ""non-parametric block boostrap"", + simplify_output = TRUE, + ...) { + .are_valid_argument_values(list( + list(incidence_data, ""module_input""), + list(bootstrapping_method, ""bootstrapping_method""), + list(simplify_output, ""boolean"") + )) + + dots_args <- .get_dots_as_list(...) + input <- .get_module_input(incidence_data) + + if (bootstrapping_method == ""non-parametric block boostrap"") { + bootstrapped_incidence <- do.call( + "".block_bootstrap"", + c( + list(incidence_input = input), + .get_shared_args( + list( + .block_bootstrap, + .block_bootstrap_overlap_func, + .smooth_LOESS + ), + dots_args + ) + ) + ) + } else if (bootstrapping_method == ""none"") { + bootstrapped_incidence <- input + } else { + bootstrapped_incidence <- .make_empty_module_output() + } + + if (simplify_output) { + bootstrapped_incidence <- .simplify_output(bootstrapped_incidence) + } + + return(bootstrapped_incidence) +} + +#' Apply block-bootstrapping procedure to module input +#' +#' \code{.block_bootstrap} returns a block-bootstrapped replicate +#' of the incidence. Incidence should be a vector of non-negative values +#' +#' This function works by resampling blocks of differences (on the log-scale) +#' between the original data and a smoothed version of the original data. +#' +#' @param round_incidence boolean. If \code{TRUE}, the bootstrapped incidence is rounded to the nearest integer. +#' @inheritParams module_methods +#' @inheritParams inner_module +#' @inheritDotParams .block_bootstrap_overlap_func -incidence_vector +#' @inheritDotParams smooth_incidence -simplify_output -incidence_data +#' +#' @return a module output object. bootstrapped incidence. +.block_bootstrap <- function(incidence_input, round_incidence = TRUE, smoothing_method = ""LOESS"", ...) { + .are_valid_argument_values(list( + list(incidence_input, ""module_input""), + list(smoothing_method, ""smoothing_method""), + list(round_incidence, ""boolean"") + )) + + dots_args <- .get_dots_as_list(...) + incidence_vector <- .get_values(incidence_input) + log_original <- log(incidence_vector + 1) + + smoothed_log <- do.call( + ""smooth_incidence"", + c( + list( + incidence_data = log_original, + smoothing_method = smoothing_method + ), + .get_shared_args(.smooth_LOESS, dots_args) + ) + ) + + diff_smoothed_original <- log_original - smoothed_log + + bootstrapped_diff <- do.call( + "".block_bootstrap_overlap_func"", + c( + list(incidence_vector = diff_smoothed_original), + .get_shared_args(.block_bootstrap_overlap_func, dots_args) + ) + ) + + bootstrapped_incidence <- exp(bootstrapped_diff + smoothed_log) - 1 + bootstrapped_incidence[bootstrapped_incidence < 0] <- 0 + + if (round_incidence) { + bootstrapped_incidence <- round(bootstrapped_incidence) + } + + return(.get_module_output(bootstrapped_incidence, .get_offset(incidence_input))) +} + +#' Helper function for block-bootstrapping +#' +#' Builds a bootstrapped vector of errors. +#' +#' @param incidence_vector numeric vector. Original incidence to bootstrap over. +#' @param block_size integer. Size of a bootstrapping block. +#' @param keep_weekdays_aligned boolean. +#' Set to \code{FALSE} if not daily incidence, or if no weekly noise pattern that would require +#' to apply errors to the same day of the week as they were in the original data. +#' +#' @return numeric vector. Bootstrapped differences. +.block_bootstrap_overlap_func <- function(incidence_vector, block_size = 10, keep_weekdays_aligned = TRUE) { + .are_valid_argument_values(list( + list(incidence_vector, ""numeric_vector""), + list(block_size, ""positive_integer""), + list(keep_weekdays_aligned, ""boolean"") + )) + + bootstrapped_incidence <- c() + + if (keep_weekdays_aligned) { + # get the weekdays for each position of incidence_vector + weekdays_index <- (1:length(incidence_vector)) %% 7 + weekdays_index[which(weekdays_index == 0)] <- 7 + last_day_index <- 7 + } + + while (length(bootstrapped_incidence) < length(incidence_vector)) { + start_index <- sample(1:(length(incidence_vector) - block_size + 1), 1) + sampled_index <- start_index:(start_index + block_size - 1) + + if (keep_weekdays_aligned) { + sampled_weekdays <- weekdays_index[sampled_index] + # make sure the day related to the first sample is after the previous bootstrapped_incidence + first_day_index <- which(sampled_weekdays == last_day_index)[1] + 1 + bootstrapped_incidence_index <- sampled_index[first_day_index:block_size] + last_day_index <- utils::tail(weekdays_index[bootstrapped_incidence_index], 1) + } else { + bootstrapped_incidence_index <- sampled_index + } + + bootstrapped_incidence <- c(bootstrapped_incidence, incidence_vector[bootstrapped_incidence_index]) + } + + # take the same length as previous incidence_vector + bootstrapped_incidence <- bootstrapped_incidence[1:length(incidence_vector)] + return(bootstrapped_incidence) +} +","R" +"Nowcasting","covid-19-Re/estimateR","R/utils-input.R",".R","4521","148","#' Transform input data into a module input object +#' +#' The input can be a list containing a \code{values} element +#' and an \code{index_offset} element, potentially among others. +#' It can also be a vector containing numeric values. +#' +#' @param data A module input object or numeric vector. +#' +#' @return module input object. +#' List with a \code{values} and an \code{index_offset} element. +#' +.get_module_input <- function(data) { + .are_valid_argument_values(list(list(data, ""module_input""))) + + if (is.list(data)) { + values <- as.double(data$values) + index_offset <- data$index_offset + } else { + values <- as.double(data) + index_offset <- 0 + } + return(list(""values"" = values, ""index_offset"" = index_offset)) +} + +#' Get values from module object +#' +#' This function must be adapted if the module_input_object implementation changes. +#' +#' @param module_object module object. +#' +#' @return vector containing \code{values} only +.get_values <- function(module_object) { + .are_valid_argument_values(list(list(module_object, ""module_input""))) + if (is.list(module_object)) { + return(module_object$values) + } else { + return(module_object) + } +} + + +#' Get offset from module object +#' +#' This function must be adapted if the module_input_object implementation changes. +#' +#' @param module_object module object. +#' +#' @return numeric scalar. \code{index_offset} of the \code{module_object} +.get_offset <- function(module_object) { + .are_valid_argument_values(list(list(module_object, ""module_input""))) + if (is.list(module_object)) { + return(module_object$index_offset) + } else { + return(0) + } +} + +#' Get length of values vector in a module input object. +#' +#' @inheritParams inner_module +#' +#' @return integer. length of values vector. +.get_input_length <- function(input) { + .are_valid_argument_values(list(list(input, ""module_input""))) + return(length(.get_values(input))) +} + +#' Add values from two module objects +#' +#' Values in the output object are values added from the two objects. +#' The \code{offset} of the output is the maximum offset between the two input objects. +#' +#' @inherit inner_module +#' +#' @export +inner_addition <- function(input_a, input_b) { + .are_valid_argument_values(list( + list(input_a, ""module_input""), + list(input_b, ""module_input"") + )) + + length_a <- .get_input_length(input_a) + length_b <- .get_input_length(input_b) + + offset_a <- .get_offset(input_a) + offset_b <- .get_offset(input_b) + + inner_offset <- max(offset_a, offset_b) + length_addition <- min(length_a - (inner_offset - offset_a), length_b - (inner_offset - offset_b)) + + inner_a <- .get_values(input_a)[seq(from = inner_offset - offset_a + 1, by = 1, length.out = length_addition)] + inner_b <- .get_values(input_b)[seq(from = inner_offset - offset_b + 1, by = 1, length.out = length_addition)] + + return(.get_module_input(list(values = inner_a + inner_b, index_offset = inner_offset))) +} + +#' Add values from two module objects +#' +#' The \code{offset} of the output object is the minimum offset between \code{input_a} and \code{input_b}. +#' @inherit inner_module +#' +#' @export +left_addition <- function(input_a, input_b) { + .are_valid_argument_values(list( + list(input_a, ""module_input""), + list(input_b, ""module_input"") + )) + + offset_a <- .get_offset(input_a) + offset_b <- .get_offset(input_b) + + min_offset <- min(offset_a, offset_b) + padded_input_a <- leftpad_input(input_a, min_offset, padding_value = 0) + padded_input_b <- leftpad_input(input_b, min_offset, padding_value = 0) + + length_a <- .get_input_length(padded_input_a) + length_b <- .get_input_length(padded_input_b) + + length_addition <- min(length_a, length_b) + + values_a <- .get_values(padded_input_a)[1:length_addition] + values_b <- .get_values(padded_input_b)[1:length_addition] + + return(.get_module_input(list(values = values_a + values_b, index_offset = min_offset))) +} + +#' Pad values on the left side of input +#' +#' @param new_offset Offset of output. +#' @param padding_value Value to left-pad input with. +#' @inherit inner_module +#' +#' @export +leftpad_input <- function(input, new_offset, padding_value = 0) { + .are_valid_argument_values(list( + list(input, ""module_input""), + list(new_offset, ""number""), + list(padding_value, ""number"") + )) + + if (new_offset >= .get_offset(input)) { + return(input) + } else { + padded_values <- c(rep(padding_value, length.out = .get_offset(input) - new_offset), .get_values(input)) + return(list(values = padded_values, index_offset = new_offset)) + } +} +","R" +"Nowcasting","covid-19-Re/estimateR","data-raw/HK_incidence_data.R",".R","3117","82","## code to prepare `HK_incidence_data` dataset goes here + +max_date <- as.Date(""2020-08-01"") + +HK_linelist_data_url <- ""https://api.data.gov.hk/v2/filter?q=%7B%22resource%22%3A%22http%3A%2F%2Fwww.chp.gov.hk%2Ffiles%2Fmisc%2Fenhanced_sur_covid_19_eng.csv%22%2C%22section%22%3A1%2C%22format%22%3A%22csv%22%7D"" +HK_linelist_data <- try(readr::read_csv(HK_linelist_data_url, + col_types = list( + `Case no.` = readr::col_double(), + `Report date` = readr::col_character(), + `Date of onset` = readr::col_character(), + Gender = readr::col_character(), + Age = readr::col_double(), + `Name of hospital admitted` = readr::col_character(), + `Hospitalised/Discharged/Deceased` = readr::col_character(), + `HK/Non-HK resident` = readr::col_character(), + `Case classification*` = readr::col_character(), + `Confirmed/probable` = readr::col_character() + ) +)) + +if (""try-error"" %in% class(HK_linelist_data)) { + stop(stringr::str_c(""Couldn't read Hong Kong linelist data at "", HK_linelist_data_url)) +} + +HK_linelist_data <- HK_linelist_data %>% + dplyr::filter(.data$`Confirmed/probable` == ""Confirmed"") %>% + # only keep confirmed cases + dplyr::transmute( + report_date = as.Date(.data$`Report date`, format = ""%d/%m/%Y""), + onset_date = as.Date(.data$`Date of onset`, format = ""%d/%m/%Y"") + ) %>% + dplyr::filter( + !is.na(.data$report_date), + .data$report_date < max_date + ) + +# Gather the incidence based on all confirmed cases +case_incidence <- HK_linelist_data %>% + dplyr::transmute(date = .data$report_date) %>% + dplyr::group_by(.data$date) %>% + dplyr::tally(name = ""case_incidence"") + +# Gather incidence based on dates of onset of symptoms +# for cases for which this is known +onset_incidence <- HK_linelist_data %>% + dplyr::transmute(date = .data$onset_date) %>% + dplyr::filter(!is.na(.data$date)) %>% + dplyr::group_by(.data$date) %>% + dplyr::tally(name = ""onset_incidence"") + +# Gather incidence based on dates of case confirmation +# for cases with no known date of onset of symptoms +report_incidence <- HK_linelist_data %>% + # Only keep report_date when we do not have the onset_date + dplyr::mutate(report_date = dplyr::if_else(is.na(.data$onset_date), + .data$report_date, + as.Date(NA) + )) %>% + dplyr::transmute(date = .data$report_date) %>% + dplyr::filter(!is.na(.data$date)) %>% + dplyr::group_by(.data$date) %>% + dplyr::tally(name = ""report_incidence"") + +HK_incidence_data <- dplyr::full_join(onset_incidence, report_incidence, by = ""date"") %>% + dplyr::full_join(y = case_incidence, by = ""date"") %>% + tidyr::replace_na(list(onset_incidence = 0, report_incidence = 0, case_incidence = 0)) %>% + tidyr::complete( + date = seq.Date(min(.data$date), # add zeroes for dates with no reported case + max(.data$date), + by = ""days"" + ), + fill = list( + onset_incidence = 0, + report_incidence = 0, + case_incidence = 0 + ) + ) %>% + dplyr::select(.data$date, .data$case_incidence, .data$onset_incidence, .data$report_incidence) %>% + dplyr::arrange(.data$date) + +usethis::use_data(HK_incidence_data, overwrite = TRUE, compress = ""bzip2"", version = 2) +","R" +"Nowcasting","covid-19-Re/estimateR","data-raw/HK_delay_data.R",".R","2028","48","## code to prepare `HK_delay_data` dataset goes here + +max_delay_confirm <- 30 +max_date <- as.Date(""2020-08-01"") + +HK_linelist_data_url <- ""https://api.data.gov.hk/v2/filter?q=%7B%22resource%22%3A%22http%3A%2F%2Fwww.chp.gov.hk%2Ffiles%2Fmisc%2Fenhanced_sur_covid_19_eng.csv%22%2C%22section%22%3A1%2C%22format%22%3A%22csv%22%7D"" +HK_linelist_data <- try(readr::read_csv(HK_linelist_data_url, + col_types = list( + `Case no.` = readr::col_double(), + `Report date` = readr::col_character(), + `Date of onset` = readr::col_character(), + Gender = readr::col_character(), + Age = readr::col_double(), + `Name of hospital admitted` = readr::col_character(), + `Hospitalised/Discharged/Deceased` = readr::col_character(), + `HK/Non-HK resident` = readr::col_character(), + `Case classification*` = readr::col_character(), + `Confirmed/probable` = readr::col_character() + ) +)) + +if (""try-error"" %in% class(HK_linelist_data)) { + stop(stringr::str_c(""Couldn't read Hong Kong linelist data at "", HK_linelist_data_url)) +} + +HK_delay_data <- HK_linelist_data %>% + dplyr::filter(.data$`Confirmed/probable` == ""Confirmed"") %>% + # only keep confirmed cases + dplyr::transmute( + event_date = as.Date(.data$`Date of onset`, format = ""%d/%m/%Y""), # rename/reformat columns + report_date = as.Date(.data$`Report date`, format = ""%d/%m/%Y"") + ) %>% + dplyr::filter(!is.na(.data$event_date), !is.na(.data$report_date)) %>% + dplyr::filter(.data$event_date < max_date) %>% + dplyr::mutate(report_delay = as.integer(.data$report_date - .data$event_date)) %>% + # extract reporting delays + dplyr::mutate(report_delay = if_else(!between(.data$report_delay, 0, max_delay_confirm), # curate negative or too large reporting delays + as.integer(NA), + .data$report_delay + )) %>% + dplyr::filter(!is.na(.data$report_delay)) %>% + # remove NA values + dplyr::select(-.data$report_date) %>% + # rearrange dataset + dplyr::arrange(.data$event_date) + +usethis::use_data(HK_delay_data, overwrite = TRUE, compress = ""xz"", version = 2) +","R" +"Nowcasting","covid-19-Re/estimateR","data-raw/EST_incidence_data.R",".R","482","16"," +# Estonian case data from Terviseamet +url <- ""https://opendata.digilugu.ee/opendata_covid19_tests_total.csv"" +EST_data <- try(readr::read_csv(url)) +if (""try-error"" %in% class(EST_data)) { + stop(stringr::str_c(""Couldn't read EST case data at "", url)) +} + +EST_incidence_data <- EST_data %>% + dplyr::transmute( + date = lubridate::as_date(StatisticsDate), + case_incidence = DailyCases + ) + +usethis::use_data(EST_incidence_data, overwrite = TRUE, compress = ""bzip2"", version = 2) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat.R",".R","62","5","library(testthat) +library(estimateR) + +test_check(""estimateR"") +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-utils-input.R",".R","689","17","test_that("".get_module_input() deals with well-formatted input"", { + toy_incidence <- c(1, 2, 1, 0) + data_1 <- list(values = toy_incidence, index_offset = -4) + + expect_identical(.get_module_input(toy_incidence), list(values = toy_incidence, index_offset = 0)) + expect_identical(.get_module_input(data_1), data_1) +}) + +test_that("".get_module_input() checks input format"", { + toy_incidence <- c(1, 23, 1, 50) + data_1 <- list(values = toy_incidence, index_offset = -2, baz = 6) + invalid_input_1 <- list(a = toy_incidence, b = -2, c = 6) + + expect_identical(.get_module_input(data_1), list(values = toy_incidence, index_offset = -2)) + expect_error(.get_module_input(invalid_input_1)) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-pipe.R",".R","37372","1023","test_that(""estimate_Re_from_noisy_delayed_incidence yields consistent results on a toy example"", { + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + delay_distribution <- c( + 0, 0.015, 0.09, 0.168, + 0.195, 0.176, 0.135, 0.091, 0.057, 0.034, + 0.019, 0.01, 0.005, 0.003, + 0.001, 0.001 + ) + + estimates <- estimate_Re_from_noisy_delayed_incidence( + incidence_data = toy_incidence_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + delay = delay_distribution, + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 10, + mean_Re_prior = 1, + output_Re_only = FALSE, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"" + ) + + reference_R_values <- c( + NA, NA, NA, NA, NA, NA, 3.15, 2.86, 2.67, + 2.53, 2.41, 2.29, 2.18, 2.08, 1.98, 1.88, + 1.78, 1.69, 1.61, 1.52, 1.44, 1.37, 1.3, 1.23, + 1.16, 1.09, 1.02, 0.96, 0.9, 0.86, 0.82, 0.79, + 0.76, 0.74, 0.72,0.69,0.66,NA,NA,NA,NA,NA) + + reference_dates <- seq.Date(from = as.Date(""2020-01-30""), to = as.Date(""2020-03-11""), by = ""day"") + + expect_equal(estimates$Re_estimate, reference_R_values, tolerance = 5E-2) + expect_equal(estimates$date, reference_dates) +}) + +test_that(""estimate_Re_from_noisy_delayed_incidence yields consistent results with import data"", { + toy_incidence_data <- c( + 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1, 2, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + toy_import_data <- c( + 1, 1, 1, 1, 1, 1, 2, 2, 2, 0, 0, + 0, 0, 0, 0, 0, 9, 0, 1, 0, 0, 0, 0 + ) + + delay_distribution <- c( + 0, 0.015, 0.09, 0.168, + 0.195, 0.176, 0.135, 0.091, 0.057, 0.034, + 0.019, 0.01, 0.005, 0.003, + 0.001, 0.001 + ) + + estimates <- estimate_Re_from_noisy_delayed_incidence( + incidence_data = toy_incidence_data, + import_incidence_data = toy_import_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + delay = delay_distribution, + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 10, + mean_Re_prior = 1, + output_Re_only = FALSE, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"" + ) + + reference_R_values <- c( + NA, NA, NA, NA, NA, NA, NA, NA, NA, 3.52, 3.59, 3.52, + 3.37, 3.21, 3.05, 2.89, 2.74, 2.61, 2.48, 2.36, + 2.24, 2.13, 2.03, 1.92, 1.83, 1.73, 1.64, 1.56, + 1.47, 1.39, 1.32, 1.25, 1.17, 1.1, 1.02, 0.95, + 0.89, 0.84, 0.81, 0.77, 0.74, 0.72, 0.69, 0.65, + 0.62, NA, NA, NA, NA, NA + ) + + reference_dates <- seq.Date(from = as.Date(""2020-01-30""), to = as.Date(""2020-03-19""), by = ""day"") + + expect_equal(estimates$Re_estimate, reference_R_values, tolerance = 5E-2) + expect_equal(estimates$date, reference_dates) +}) + +test_that(""estimate_Re_from_noisy_delayed_incidence passes '...' arguments consistently"", { + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + delay_distribution <- c( + 0, 0.015, 0.09, 0.168, + 0.195, 0.176, 0.135, 0.091, 0.057, 0.034, + 0.019, 0.01, 0.005, 0.003, + 0.001, 0.001 + ) + + estimates <- estimate_Re_from_noisy_delayed_incidence( + incidence_data = toy_incidence_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + delay = delay_distribution, + estimation_window = 5, + mean_serial_interval = 8, + std_serial_interval = 3, + minimum_cumul_incidence = 0, + block_size = 3, + degree = 1, + mean_Re_prior = 2.5, + output_Re_only = FALSE, + index_col = ""date_index"" + ) + + reference_R_values <- c( + NA, NA, NA, NA, NA, NA, NA, + NA, NA, NA, NA, 4.9, 4.42, + 4.03, 3.7, 3.4, 3.13, 2.89, + 2.66, 2.44, 2.25, 2.07, 1.9, 1.74, + 1.59, 1.45, 1.32, 1.2, 1.09, + 0.99, 0.91, 0.84, 0.77, 0.72, + 0.67, 0.63, 0.59, NA, NA, NA, NA, NA + ) + reference_indices <- -5:36 + + expect_equal(estimates$Re_estimate, reference_R_values, tolerance = 1E-2) + expect_equal(estimates$date_index, reference_indices) +}) + +test_that(""get_block_bootstrapped_estimate yields consistent results on a toy example"", { + skip_on_cran() + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + shape_incubation <- 2 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 3 + scale_onset_to_report <- 1.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + estimates <- get_block_bootstrapped_estimate( + incidence_data = toy_incidence_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""bagged mean - CI from bootstrap estimates"", + delay = list(delay_incubation, delay_onset_to_report), + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 10, + mean_Re_prior = 1, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"" + ) + + reference_dates <- seq.Date( + from = as.Date(""2020-02-04""), + to = as.Date(""2020-03-05""), + by = ""day"" + ) + + reference_R_mean_values <- c( + 3.33, 3.04, 2.84, 2.67, 2.52, 2.38, 2.25, + 2.13, 2.01, 1.9, 1.8, 1.7, 1.61, 1.52, 1.44, + 1.36, 1.29, 1.22, 1.15, 1.09, 1.03, 0.97, + 0.92, 0.89, 0.86, 0.84, 0.82, 0.8, 0.78, 0.77, 0.75 + ) + + reference_CI_down_values <- c( + 3.13, 2.84, 2.65, 2.51, 2.39, 2.27, + 2.14, 2.02, 1.91, 1.81, 1.71, 1.63, + 1.54, 1.46, 1.39, 1.31, 1.24, 1.17, + 1.11, 1.05, 0.99, 0.94, 0.89, 0.85, + 0.82, 0.79, 0.77, 0.75, 0.72, 0.7, 0.67 + ) + + reference_CI_up_values <- c( + 3.54, 3.24, 3.02, 2.83, 2.66, 2.5, 2.36, 2.23, + 2.12, 2, 1.88, 1.78, 1.68, 1.58, 1.49, 1.41, + 1.34, 1.26, 1.19, 1.12, 1.06, 1, 0.96, 0.92, + 0.9, 0.88, 0.87, 0.86, 0.84, 0.83, 0.82 + ) + + + expect_equal(estimates$date, reference_dates) + expect_equal(estimates$Re_estimate, reference_R_mean_values, tolerance = 1E-1) + expect_equal(estimates$CI_down_Re_estimate, reference_CI_down_values, tolerance = 1E-1) + expect_equal(estimates$CI_up_Re_estimate, reference_CI_up_values, tolerance = 1E-1) + + estimates <- get_block_bootstrapped_estimate( + incidence_data = toy_incidence_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + delay = list(delay_incubation, delay_onset_to_report), + estimation_window = 3, + minimum_cumul_incidence = 0, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + mean_Re_prior = 1 + ) + + reference_indices <- 0:30 + + reference_R_original_values <- c( + 3.2, 2.91, 2.72, 2.57, 2.44, 2.32, + 2.2, 2.09, 1.98, 1.88, 1.78, 1.69, + 1.6, 1.52, 1.44, 1.36, 1.29, 1.22, + 1.15, 1.08, 1.01, 0.95, 0.89, 0.84, + 0.8, 0.77, 0.75, 0.72, 0.7, 0.66, 0.63 + ) + + reference_CI_down_values <- c( + 3.04, 2.75, 2.58, 2.45, 2.34, 2.22, + 2.11, 1.99, 1.89, 1.79, 1.7, 1.62, + 1.55, 1.47, 1.39, 1.32, 1.25, 1.18, + 1.11, 1.04, 0.98, 0.91, 0.86, 0.81, + 0.77, 0.73, 0.7, 0.67, 0.64, 0.6, 0.56 + ) + + reference_CI_up_values <- c( + 3.36, 3.07, 2.86, 2.69, 2.55, 2.42, 2.3, + 2.19, 2.08, 1.97, 1.86, 1.76, 1.66, 1.57, + 1.48, 1.4, 1.33, 1.26, 1.18, 1.11, 1.04, + 0.98, 0.92, 0.88, 0.84, 0.82, 0.8, 0.78, + 0.75, 0.73, 0.7 + ) + + expect_equal(estimates$idx, reference_indices) + expect_equal(estimates$Re_estimate, reference_R_original_values, tolerance = 1E-1) + expect_equal(estimates$CI_down_Re_estimate, reference_CI_down_values, tolerance = 1E-1) + expect_equal(estimates$CI_up_Re_estimate, reference_CI_up_values, tolerance = 1E-1) +}) + +test_that(""get_block_bootstrapped_estimate yields consistent with import data"", { + skip_on_cran() + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + toy_import_data <- c( + 1, 1, 1, 1, 1, 1, 2, 2, 2, 0, 0, + 0, 0, 0, 0, 0, 9, 0, 1, 0, 0, 0, 0 + ) + + shape_incubation <- 2 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 3 + scale_onset_to_report <- 1.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + estimates <- get_block_bootstrapped_estimate( + incidence_data = toy_incidence_data, + import_incidence_data = toy_import_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""bagged mean - CI from bootstrap estimates"", + delay = list(delay_incubation, delay_onset_to_report), + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 10, + mean_Re_prior = 1, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"" + ) + + reference_dates <- seq.Date( + from = as.Date(""2020-02-04""), + to = as.Date(""2020-03-05""), + by = ""day"" + ) + + reference_R_mean_values <- c( + 2.69, 2.48, 2.37, 2.29, 2.22, 2.14, 2.06, + 1.98, 1.9, 1.82, 1.73, 1.65, 1.57, 1.49, + 1.42, 1.35, 1.28, 1.21, 1.14, 1.08, 1.02, + 0.97, 0.92, 0.89, 0.86, 0.84, 0.82, 0.8, + 0.79, 0.77, 0.75 + ) + + reference_CI_down_values <- c( + 2.53, 2.34, 2.24, 2.18, 2.12, 2.05, 1.97, + 1.89, 1.8, 1.72, 1.65, 1.58, 1.5, 1.43, + 1.36, 1.29, 1.23, 1.17, 1.11, 1.05, 0.99, 0.94, 0.89, + 0.85, 0.82, 0.79, 0.77, 0.74, 0.72, 0.7, 0.67 + ) + + reference_CI_up_values <- c( + 2.85, 2.63, 2.5, 2.4, 2.32, 2.23, 2.15, + 2.08, 2, 1.91, 1.82, 1.72, 1.64, 1.55, + 1.47, 1.4, 1.32, 1.25, 1.18, 1.12, 1.05, + 1, 0.96, 0.92, 0.9, 0.88, 0.87, 0.86, + 0.85, 0.84, 0.83 + ) + + + expect_equal(estimates$date, reference_dates) + expect_equal(estimates$Re_estimate, reference_R_mean_values, tolerance = 1E-1) + expect_equal(estimates$CI_down_Re_estimate, reference_CI_down_values, tolerance = 1E-1) + expect_equal(estimates$CI_up_Re_estimate, reference_CI_up_values, tolerance = 1E-1) +}) + +test_that(""get_block_bootstrapped_estimate passes '...' arguments to inner functions properly"", { + skip_on_cran() + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + shape_incubation <- 2 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 3 + scale_onset_to_report <- 1.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + estimates <- get_block_bootstrapped_estimate( + incidence_data = toy_incidence_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""bagged mean - CI from bootstrap estimates"", + delay = list(delay_incubation, delay_onset_to_report), + estimation_window = 5, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 0, + block_size = 8, + degree = 2, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"" + ) + + reference_R_mean_values <- c( + 5.5, 4.6, 3.95, 3.49, 3.15, 2.88, + 2.66, 2.48, 2.31, 2.15, 2.02, 1.89, 1.76, 1.64, + 1.53, 1.42, 1.32, 1.22, 1.13, 1.05, 0.98, 0.92, + 0.86, 0.8, 0.74, 0.67, 0.6, 0.51, 0.42 + ) + + reference_CI_down_values <- c( + 5.01, 4.28, 3.72, + 3.28, 2.95, 2.69, 2.49, 2.34, 2.2, 2.06, + 1.94, 1.83, 1.71, 1.59, 1.49, 1.38, 1.28, + 1.19, 1.1, 1.03, 0.96, 0.9, 0.84, 0.78, + 0.72, 0.66, 0.58, 0.5, 0.4 + ) + + reference_CI_up_values <- c( + 5.99, 4.92, + 4.18, 3.69, 3.36, 3.07, 2.82, + 2.62, 2.43, 2.25, 2.09, 1.95, + 1.81, 1.69, 1.57, 1.46, 1.36, + 1.26, 1.17, 1.08, 1, 0.93, 0.87, + 0.81, 0.75, 0.68, 0.61, 0.53, 0.44 + ) + + # master + # reference_R_mean_values <- c(6.83,5.37,4.53,4.05,3.71,3.43,3.21,3.02,2.84, + # 2.66,2.5,2.34,2.18,2.04,1.9,1.77,1.64, + # 1.53,1.42,1.32,1.22,1.13,1.05,0.98,0.92, + # 0.86,0.8,0.74,0.67,0.6,0.52,0.42) + # + # reference_CI_down_values <- c(6.29,4.95,4.21,3.8,3.51,3.27, + # 3.07,2.9,2.73,2.57,2.42,2.28,2.12, + # 1.99,1.86,1.73,1.61,1.5,1.39,1.29, + # 1.19,1.11,1.03,0.96,0.9,0.84,0.78, + # 0.73,0.66,0.59,0.5,0.4) + # + # reference_CI_up_values <- c(7.37,5.79,4.85,4.29,3.9, + # 3.59,3.34,3.14,2.94,2.74, + # 2.57,2.4,2.24,2.09,1.95, + # 1.81,1.68,1.56,1.45,1.34, + # 1.25,1.16,1.08,1,0.94,0.87, + # 0.81,0.75,0.69,0.62,0.54,0.45) + + + expect_equal(estimates$Re_estimate, reference_R_mean_values, tolerance = 1E-1) + expect_equal(estimates$CI_down_Re_estimate, reference_CI_down_values, tolerance = 1E-1) + expect_equal(estimates$CI_up_Re_estimate, reference_CI_up_values, tolerance = 1E-1) +}) + +test_that(""get_infections_from_incidence handles partially-delayed data correctly"", { + toy_onset_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66, 45, 32, 11, 5, 4, 0, 1, 2, 0 + ) + + shape_incubation <- 2 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 3 + scale_onset_to_report <- 1.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + result_deconvolution <- get_infections_from_incidence( + incidence_data = toy_onset_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + delay = delay_incubation, + is_partially_reported_data = TRUE, + delay_until_final_report = delay_onset_to_report, + output_infection_incidence_only = FALSE, + data_points_incl = 21, + degree = 1, + cutoff_observation_probability = 0.1 + ) + + reference_deconvolved_incidence <- c( + 16.417, 19.763, 22.179, 26.041, 32.397, 40.832, + 50.92, 62.293, 75.173, 89.947, 106.529, 124.207, + 142.552, 161.947, 182.9, 203.07, 221.026, 239.061, + 255.931, 268.766, 278.113, 285.134, 289.532, 289.975, + 286.109, 278.965, 269.38, 256.736, 240.254, 221.707, + 202.645, 181.802, 160.793, 141.705, 123.465, 104.689, + 85.982, 68.065, 49.84, 31.857, 15.892, 3.778, + 0, 0, 0, NA, NA, NA + ) + + expect_equal(result_deconvolution$deconvolved_incidence, reference_deconvolved_incidence, tolerance = 1E-1) +}) + +test_that(""estimate_from_combined_observations returns consistent results"", { + toy_onset_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66, 45, 32, 11, 5, 4, 0, 1, 2, 0 + ) + + toy_case_confirmation_data <- c( + 11, 12, 21, 23, 2, 14, 49, 61, 65, 45, 66, 45, + 40, 8, 61, 38, 1, 3, 45, 66, 12, 52, 27, 3, 54, + 10, 18, 54, 12, 48, 67, 62, 54, 3, 29, 10, 52, + 61, 33, 39, 55, 8, 64, 51, 65, 34 + ) + + shape_incubation <- 1 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 4 + scale_onset_to_report <- 2.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + results_estimation <- estimate_from_combined_observations( + partially_delayed_incidence = toy_onset_data, + fully_delayed_incidence = toy_case_confirmation_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + partial_observation_requires_full_observation = TRUE, + ref_date = as.Date(""2021-03-24""), + time_step = ""day"", + minimum_cumul_incidence = 0, + output_Re_only = FALSE, + data_points_incl = 21, + degree = 1 + ) + + reference_deconvolved_incidence <- c( + 59.2, 61.7, 64.2, 69.8, 76.9, 84.4, 92.1, 101.5, 112.9, 126.2, + 140.8, 156.9, 175.5, 195.1, 214.5, 235.3, 255.6, 272.3, 287, + 301.6, 314.4, 323.1, 328.8, 332.7, 333.8, 330.9, 324.9, 317.1, + 305.9, 290.8, 273.5, 255.4, 237.4, 219.6, 201.4, 183.3, 165.1, + NA, NA, NA, NA, NA, NA, NA, NA, NA, NA + ) + + reference_R_values <- c( + NA, NA, NA, NA, NA, NA, 2, 1.7, 1.6, 1.6, 1.6, 1.6, 1.6, 1.6, + 1.6, 1.6, 1.6, 1.5, 1.5, 1.4, 1.4, 1.3, 1.3, 1.2, 1.2, 1.1, + 1.1, 1, 1, 0.9, 0.9, 0.9, 0.8, 0.8, 0.7, 0.7, 0.7, + NA, NA, NA, NA, NA, NA, NA, NA, NA, NA + ) + + expect_equal(results_estimation$combined_deconvolved_incidence, reference_deconvolved_incidence, tolerance = 1E-1) + expect_equal(results_estimation$Re_estimate, reference_R_values, tolerance = 1E-1) +}) + +test_that(""get_bootstrapped_estimate_from_combined_observations returns consistent results"", { + skip_on_cran() + toy_onset_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66, 45, 32, 11, 5, 4, 0, 1, 2, 0 + ) + + toy_case_confirmation_data <- c( + 11, 12, 21, 23, 2, 14, 49, 61, 65, 45, 66, 45, + 40, 8, 61, 38, 1, 3, 45, 66, 12, 52, 27, 3, 54, + 10, 18, 54, 12, 48, 67, 62, 54, 3, 29, 10, 52, + 61, 33, 39, 55, 8, 64, 51, 65, 34 + ) + + shape_incubation <- 1 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 4 + scale_onset_to_report <- 2.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + results_estimation <- get_bootstrapped_estimates_from_combined_observations( + partially_delayed_incidence = toy_onset_data, + fully_delayed_incidence = toy_case_confirmation_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + bootstrapping_method = ""non-parametric block boostrap"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + N_bootstrap_replicates = 100, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + partial_observation_requires_full_observation = TRUE, + ref_date = as.Date(""2021-03-24""), + time_step = ""day"", + output_Re_only = TRUE, + minimum_cumul_incidence = 0, + data_points_incl = 21, + degree = 1 + ) + + + reference_R_values <- c( + 2, 1.7, 1.6, 1.6, 1.6, 1.6, 1.6, 1.6, + 1.6, 1.6, 1.6, 1.5, 1.5, 1.4, 1.4, 1.3, 1.3, 1.2, 1.2, 1.1, + 1.1, 1, 1, 0.9, 0.9, 0.9, 0.8, 0.8, 0.7, 0.7, 0.7 + ) + + reference_CI_up <- c( + 2.17, 1.95, 1.85, 1.81, 1.79, 1.79, 1.79, + 1.78, 1.76, 1.73, 1.68, 1.62, 1.56, 1.49, 1.43, 1.38, + 1.32, 1.26, 1.21, 1.15, 1.11, 1.06, 1.02, 0.98, 0.94, + 0.9, 0.86, 0.83, 0.81, 0.79, 0.77 + ) + + reference_CI_down <- c( + 1.75, 1.54, 1.45, 1.42, 1.43, 1.44, 1.46, + 1.48, 1.49, 1.49, 1.48, 1.46, 1.41, 1.36, 1.31, 1.25, + 1.21, 1.16, 1.12, 1.07, 1.03, 0.99, 0.94, 0.89, 0.85, + 0.8, 0.76, 0.72, 0.69, 0.65, 0.61 + ) + + expect_equal(results_estimation$Re_estimate, reference_R_values, tolerance = 1E-1) + expect_equal(results_estimation$CI_up_Re_estimate, reference_CI_up, tolerance = 1E-1) + expect_equal(results_estimation$CI_down_Re_estimate, reference_CI_down, tolerance = 1E-1) +}) + +test_that(""get_bootstrapped_estimate_from_combined_observations can deal with empirical delay data"", { + skip_on_cran() + ref_date <- as.Date(""2021-03-24"") + n_days <- 50 + shape_initial_delay <- 6 + scale_initial_delay <- 1.5 + distribution_initial_delay <- list(name = ""gamma"", shape = shape_initial_delay, scale = scale_initial_delay) + seed <- 7543265 + + generated_empirical_delays <- .generate_delay_data( + origin_date = ref_date, + n_time_steps = n_days, + ratio_delay_end_to_start = 1.5, + distribution_initial_delay = distribution_initial_delay, + seed = seed + ) + + toy_onset_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66, 45, 32, 11, 5, 4, 0, 1, 2, 0 + ) + + toy_case_confirmation_data <- c( + 11, 12, 21, 23, 2, 14, 49, 61, 65, 45, 66, 45, + 40, 8, 61, 38, 1, 3, 45, 66, 12, 52, 27, 3, 54, + 10, 18, 54, 12, 48, 67, 62, 54, 3, 29, 10, 52, + 61, 33, 39, 55, 8, 64, 51, 65, 34 + ) + + shape_incubation <- 2 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + results_estimation <- get_bootstrapped_estimates_from_combined_observations( + partially_delayed_incidence = toy_onset_data, + fully_delayed_incidence = toy_case_confirmation_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + bootstrapping_method = ""non-parametric block boostrap"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + N_bootstrap_replicates = 20, # to speed things up + delay_until_partial = delay_incubation, + delay_until_final_report = generated_empirical_delays, + partial_observation_requires_full_observation = TRUE, + ref_date = ref_date, + time_step = ""day"", + output_Re_only = TRUE, + minimum_cumul_incidence = 0, + data_points_incl = 21, + degree = 1, + cutoff_observation_probability = 0.1 + ) + + reference_R_values <- c( + 1.98, 1.76, 1.67, 1.63, + 1.63, 1.65, 1.66, 1.66, + 1.66, 1.63, 1.59, 1.54, + 1.48, 1.42, 1.37, 1.32, + 1.26, 1.21, 1.17, 1.12, + 1.08, 1.04, 1, 0.95, 0.91, + 0.87, 0.83, 0.8, 0.77, 0.75 + ) + + expect_equal(results_estimation$Re_estimate, reference_R_values, tolerance = 1E-1) +}) + +test_that(""get_block_bootstrapped_estimate yields consistent results on summaries of uncertainty"", { + skip_on_cran() + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + shape_incubation <- 2 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 3 + scale_onset_to_report <- 1.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + estimates <- get_block_bootstrapped_estimate( + incidence_data = toy_incidence_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + delay = list(delay_incubation, delay_onset_to_report), + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 10, + mean_Re_prior = 1, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"", + output_Re_only = FALSE + ) + + reference_dates <- seq.Date( + from = as.Date(""2020-01-29""), + to = as.Date(""2020-03-11""), + by = ""day"" + ) + + reference_CI_down_observed_incidence_values <- c( + NA, NA, NA, NA, NA, NA, 4.9, 8, 10, 13, 17, 20.9, 31, 41, + 50.9, 63.9, 78.9, 81.2, 108.1, 124.4, 145.6, 174.2, 208.2, 228.6, + 238.3, 232.6, 214.2, 319.7, 326.6, 318.4, 299.1, + 273.5, 237, 190, 120.2, 32.8, 153.6, 139.7, + 124.4, 104.5, 59.4, 19.8, 15.6 + ) + + reference_CI_up_deconvolved_incidence_values <- c( + 15.3, 18.8, 23.3, 28.7, 35.1, 43.4, 53.8, + 66, 80, 96.4, 115, 134.9, 155.8, 178.9, 201.5, + 222.1, 243.8, 263.9, 280, 295.9, 309.3, 317.8, 323.8, + 324.6, 318.7, 311.1, 300.2, 284.6, 270.9, + 259.1, 246.2, 234.5, 223.2, 210.6, 197.5, + 184.5, 172.5, NA, NA, NA, NA, NA, NA + ) + + reference_CI_down_smoothed_incidence_values <- c( + NA, NA, NA, NA, NA, NA, 14, 17.4, 21.6, 26.4, 32.4, 40, 49.2, 59.5, + 71, 83.9, 98.1, 113, 128.5, 145.3, 162.2, 178.5, 195.4, 211, 223.5, + 234.8, 244.3, 250.3, 252.7, 251.4, 246.6, 238.4, + 226.8, 213.2, 198.9, 184, 168, 151.7, 134.8, 116.8, 98, 79, 60.2 + ) + + reference_CI_up_R_mean_values <- c( + NA, NA, NA, NA, NA, NA, 2.98, 2.72, + 2.57, 2.47, 2.4, 2.32, 2.23, 2.14, + 2.04, 1.93, 1.83, 1.74, 1.65, 1.58, + 1.51, 1.44, 1.38, 1.31, 1.24, 1.17, 1.1, 1.04, + 0.98, 0.93, 0.9, 0.87, 0.85, 0.83, 0.82, + 0.8, 0.78, NA, NA, NA, NA, NA, NA + ) + + + expect_equal(estimates$date, reference_dates) + # This test usually fails but that is not a problem (great stochastic variability) + # expect_equal(estimates$CI_down_observed_incidence, reference_CI_down_observed_incidence_values, tolerance = 1E-1) + expect_equal(estimates$CI_up_deconvolved_incidence, reference_CI_up_deconvolved_incidence_values, tolerance = 1E-1) + expect_equal(estimates$CI_down_smoothed_incidence, reference_CI_down_smoothed_incidence_values, tolerance = 1E-1) + expect_equal(estimates$CI_up_Re_estimate, reference_CI_up_R_mean_values, tolerance = 1E-1) +}) + +test_that(""get_bootstrapped_estimate_from_combined_observations yields consistent results on summaries of uncertainty"", { + skip_on_cran() + toy_onset_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66, 45, 32, 11, 5, 4, 0, 1, 2, 0 + ) + + toy_case_confirmation_data <- c( + 11, 12, 21, 23, 2, 14, 49, 61, 65, 45, 66, 45, + 40, 8, 61, 38, 1, 3, 45, 66, 12, 52, 27, 3, 54, + 10, 18, 54, 12, 48, 67, 62, 54, 3, 29, 10, 52, + 61, 33, 39, 55, 8, 64, 51, 65, 34 + ) + + shape_incubation <- 1 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 4 + scale_onset_to_report <- 2.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + results_estimation <- get_bootstrapped_estimates_from_combined_observations( + partially_delayed_incidence = toy_onset_data, + fully_delayed_incidence = toy_case_confirmation_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + bootstrapping_method = ""non-parametric block boostrap"", + uncertainty_summary_method = ""bagged mean - CI from bootstrap estimates"", + N_bootstrap_replicates = 100, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + partial_observation_requires_full_observation = TRUE, + ref_date = as.Date(""2021-03-24""), + time_step = ""day"", + output_Re_only = FALSE, + minimum_cumul_incidence = 0, + data_points_incl = 21, + degree = 1, + cutoff_observation_probability = 0.1 + ) + + reference_partially_delayed_observations_values <- c( + NA, 4.4, 5.4, 8, 11.1, 14.8, 20.3, 28.2, 37.7, + 49.6, 63.4, 80.6, 96.3, 118, 145.3, 168.4, 193.7, + 218.7, 247.1, 261.4, 287.2, 311.2, 326.7, 336.3, + 342.9, 345.8, 327, 322, 320.6, 301.5, 273.3, 239.7, + 210.9, 177.4, 155.6, 138.5, 120.1, 104.1, 89.1, + 76.9, 63.2, 54.8, 48, NA, NA, NA, NA + ) + + reference_CI_up_combined_deconvolved_incidence_values <- c( + 58.1, 61.8, 65.4, 71.5, 79.1, 88, 98.2, 109.8, + 122.6, 137.3, 153.5, 170.8, 190.9, 211.8, 231.9, + 253.9, 276, 294.6, 311.5, 329.1, 345, 356.2, 364.7, + 370.1, 370.2, 365.8, 359.5, 351.2, 338.7, 323.2, + 307.5, 292.1, 277, 262.5, 248.7, 235.6, 222.7, + NA, NA, NA, NA, NA, NA, NA, NA, NA, NA + ) + + + reference_R_mean_values <- c( + NA, NA, NA, NA, NA, NA, 2.11, 1.9, 1.81, 1.76, + 1.74, 1.72, 1.71, 1.69, 1.66, 1.63, 1.59, 1.54, + 1.49, 1.44, 1.39, 1.34, 1.28, 1.23, 1.18, 1.13, + 1.08, 1.03, 0.99, 0.94, 0.9, 0.86, 0.83, 0.8, + 0.77, 0.75, 0.72, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA + ) + + reference_CI_up_R_mean <- c( + NA, NA, NA, NA, NA, NA, 2.3, 2.09, 1.98, + 1.92, 1.88, 1.85, 1.83, 1.8, 1.77, 1.72, + 1.67, 1.62, 1.56, 1.5, 1.45, + 1.4, 1.34, 1.28, 1.23, 1.17, 1.11, 1.07, + 1.03, 0.99, 0.95, 0.91, 0.88, 0.86, 0.84, + 0.82, 0.81, NA, NA, NA, NA, NA, NA, NA, NA, NA, NA + ) + + expect_equal(results_estimation$partially_delayed_observations, + reference_partially_delayed_observations_values, + tolerance = 1E-1 + ) + expect_equal(results_estimation$CI_up_combined_deconvolved_incidence, + reference_CI_up_combined_deconvolved_incidence_values, + tolerance = 1E-1 + ) + expect_equal(results_estimation$Re_estimate, reference_R_mean_values, tolerance = 1E-1) + expect_equal(results_estimation$CI_up_Re_estimate, reference_CI_up_R_mean, tolerance = 1E-1) +}) + +test_that(""get_block_bootstrapped_estimate consistently combines HPDs with bootstrap CIs"", { + skip_on_cran() + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 350, 403, 505, 668, 873, 987, 1050, 1268, 1490, 1760 + ) + + shape_incubation <- 2 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 3 + scale_onset_to_report <- 1.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + estimates <- get_block_bootstrapped_estimate( + incidence_data = toy_incidence_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + delay = list(delay_incubation, delay_onset_to_report), + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 10, + mean_Re_prior = 1, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"", + combine_bootstrap_and_estimation_uncertainties = TRUE, + output_Re_only = FALSE + ) + + simplified_estimates <- get_block_bootstrapped_estimate( + incidence_data = toy_incidence_data, + N_bootstrap_replicates = 100, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + uncertainty_summary_method = ""original estimate - CI from bootstrap estimates"", + delay = list(delay_incubation, delay_onset_to_report), + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + minimum_cumul_incidence = 10, + mean_Re_prior = 1, + ref_date = as.Date(""2020-02-04""), + time_step = ""day"", + combine_bootstrap_and_estimation_uncertainties = TRUE, + output_Re_only = TRUE + ) + + reference_CI_down <- c( + NA, NA, NA, NA, NA, NA, 2.51, 2.37, 2.28, + 2.2, 2.12, 2, 1.91, 1.83, 1.76, 1.67, 1.58, + 1.49, 1.43, 1.44, 1.49, 1.55, 1.59, 1.61, + 1.63, 1.63, 1.61, 1.58, 1.54, 1.5, 1.46, + 1.43, 1.41, 1.38, NA, NA, NA, NA, NA, NA + ) + + pretty_reference_CI_down <- c( + 2.51, 2.37, 2.28, + 2.2, 2.12, 2, 1.91, 1.83, 1.76, 1.67, + 1.58, 1.49, 1.43, 1.44, 1.49, 1.55, + 1.59, 1.61, 1.63, 1.63, 1.61, 1.58, + 1.54, 1.5, 1.46, 1.43, 1.41, 1.38 + ) + + reference_bootstrap_CI_up <- c( + NA, NA, NA, NA, NA, NA, 3.27, 3.08, 2.93, + 2.82, 2.72, 2.61, 2.47, 2.31, 2.16, 2.05, + 1.97, 1.92, 1.89, 1.89, 1.93, 1.99, 2.05, + 2.07, 2.08, 2.07, 2.03, 1.96, 1.87, 1.79, + 1.71, 1.64, 1.59, 1.55, NA, NA, NA, NA, NA, NA + ) + + reference_highHPD <- c( + NA, NA, NA, NA, NA, NA, 3.67, 3.33, + 3.1, 2.91, 2.74, 2.61, 2.47, + 2.31, 2.16, 2.05, 1.97, 1.92, 1.89, + 1.89, 1.93, 1.99, 2.05, 2.07, 2.08, + 2.07, 2.03, 1.96, 1.87, 1.79, 1.71, + 1.64, 1.59, 1.55, NA, NA, NA, NA, NA, NA + ) + + reference_Re_estimate <- c( + NA, NA, NA, NA, NA, NA, 3.06, 2.83, 2.67, + 2.54, 2.42, 2.31, 2.19, 2.07, 1.96, 1.86, + 1.78, 1.71, 1.66, 1.67, 1.71, 1.77, 1.82, + 1.84, 1.86, 1.85, 1.82, 1.77, 1.7, 1.64, 1.58, + 1.54, 1.5, 1.47, NA, NA, NA, NA, NA, NA + ) + + pretty_reference_Re_estimate <- c( + 3.06, 2.83, 2.67, + 2.54, 2.42, 2.31, 2.19, 2.07, 1.96, 1.86, + 1.78, 1.71, 1.66, 1.67, 1.71, 1.77, 1.82, + 1.84, 1.86, 1.85, 1.82, 1.77, 1.7, 1.64, 1.58, + 1.54, 1.5, 1.47 + ) + + expect_equal(simplified_estimates$CI_down_Re_estimate, pretty_reference_CI_down, tolerance = 1E-1) + expect_equal(estimates$CI_down_Re_estimate, reference_CI_down, tolerance = 1E-1) + expect_equal(estimates$bootstrapped_CI_up_Re_estimate, reference_bootstrap_CI_up, tolerance = 1E-1) + expect_equal(estimates$Re_highHPD, reference_highHPD, tolerance = 1E-1) + expect_equal(estimates$Re_estimate, reference_Re_estimate, tolerance = 1E-1) + expect_equal(simplified_estimates$Re_estimate, pretty_reference_Re_estimate, tolerance = 1E-1) +}) + +test_that(""get_bootstrapped_estimate_from_combined_observations consistently combines HPDs with bootstrap CIs"", { + skip_on_cran() + toy_onset_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66, 45, 32, 11, 5, 4, 0, 1, 2, 0 + ) + + toy_case_confirmation_data <- c( + 11, 12, 21, 23, 2, 14, 49, 61, 65, 45, 66, 45, + 40, 8, 61, 38, 1, 3, 4, 5, 56, 3, 45, 66, 12, 52, 27, 3, 54, + 120, 150, 230, 400, 487, 496, 602, 893, 1020, 1250, + 1400, 1746, 2190, 2567, 3498, 4192, 6432 + ) + + shape_incubation <- 1 + scale_incubation <- 1.2 + delay_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + shape_onset_to_report <- 4 + scale_onset_to_report <- 2.3 + delay_onset_to_report <- list(name = ""gamma"", shape = shape_onset_to_report, scale = scale_onset_to_report) + + results_estimation <- get_bootstrapped_estimates_from_combined_observations( + partially_delayed_incidence = toy_onset_data, + fully_delayed_incidence = toy_case_confirmation_data, + smoothing_method = ""LOESS"", + deconvolution_method = ""Richardson-Lucy delay distribution"", + estimation_method = ""EpiEstim sliding window"", + bootstrapping_method = ""non-parametric block boostrap"", + uncertainty_summary_method = ""bagged mean - CI from bootstrap estimates"", + N_bootstrap_replicates = 100, + delay_until_partial = delay_incubation, + delay_until_final_report = delay_onset_to_report, + partial_observation_requires_full_observation = TRUE, + ref_date = as.Date(""2021-03-24""), + time_step = ""day"", + minimum_cumul_incidence = 0, + data_points_incl = 21, + degree = 1, + combine_bootstrap_and_estimation_uncertainties = TRUE, + output_Re_only = TRUE + ) + + reference_R_mean_values <- c( + 2.13, 1.93, 1.84, 1.81, 1.81, 1.82, + 1.82, 1.82, 1.81, 1.8, + 1.78, 1.76, 1.73, 1.7, 1.67, 1.65, 1.62, + 1.63, 1.7, 1.85, 2.02, 2.22, 2.45, 2.68, + 2.82, 2.81, 2.64, 2.39, 2.14, 1.94, + 1.79 + ) + + reference_CI_down_R_mean <- c( + 1.64, 1.48, 1.43, 1.43, + 1.45, 1.49, 1.52, 1.55, 1.57, 1.58, 1.59, 1.58, + 1.57, 1.52, 1.47, 1.43, 1.35, 1.28, 1.27, 1.35, + 1.49, 1.7, 1.93, 1.92, 1.68, 1.46, 1.4, 1.44, + 1.48, 1.47, 1.42 + ) + + reference_CI_up_R_mean <- c( + 2.42, 2.21, 2.1, + 2.05, 2.03, 2.03, 2.02, 2.01, 1.98, 1.96, + 1.93, 1.91, 1.9, 1.89, 1.87, 1.87, 1.89, + 1.99, 2.14, 2.34, 2.55, 2.74, 2.98, 3.44, + 3.97, 4.16, 3.88, 3.34, 2.81, 2.41, + 2.15 + ) + + expect_equal(results_estimation$Re_estimate, reference_R_mean_values, tolerance = 1E-1) + expect_equal(results_estimation$CI_down_Re_estimate, reference_CI_down_R_mean, tolerance = 1E-1) + expect_equal(results_estimation$CI_up_Re_estimate, reference_CI_up_R_mean, tolerance = 1E-1) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-utils-empirical_delays.R",".R","4597","116","test_that("".get_matrix_from_empirical_delay_distr returns valid output"", { + # First toy data test + ref_date <- as.Date(""2020-03-01"") + time_series_length <- 100 + + report_delays <- sample(c(0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 9, 10), 1000, replace = T) + event_dates <- sample(seq.Date(from = ref_date, length.out = time_series_length, by = ""day""), 1000, replace = T) + + empirical_delay_data <- tibble::tibble( + event_date = event_dates, + report_delay = report_delays + ) %>% + dplyr::arrange(event_date) + + empirical_matrix <- get_matrix_from_empirical_delay_distr( + empirical_delays = empirical_delay_data, + ref_date = ref_date, + n_report_time_steps = 90, + time_step = ""day"", + min_number_cases = 10, + upper_quantile_threshold = 0.99, + fit = ""none"" + ) + + expect_delay_matrix_sums_lte_1(empirical_matrix, full_cols = 50) + + # Second toy data test + ref_date <- as.Date(""2020-04-01"") + n_days <- 50 + delay_increase <- 1.5 + shape_initial_delay <- 6 + scale_initial_delay <- 1.5 + distribution_initial_delay <- list(name = ""gamma"", shape = shape_initial_delay, scale = scale_initial_delay) + seed <- 734 + + + generated_empirical_delays <- .generate_delay_data( + origin_date = ref_date, + n_time_steps = n_days, + ratio_delay_end_to_start = 1.5, + distribution_initial_delay = distribution_initial_delay, + seed = seed + ) + + empirical_delays_matrix <- get_matrix_from_empirical_delay_distr( + empirical_delays = generated_empirical_delays, + ref_date = ref_date, + n_report_time_steps = 50, + fit = ""none"", + min_number_cases = 5 + ) + + + expect_delay_matrix_sums_lte_1(empirical_delays_matrix, full_cols = 20) +}) + +test_that("".get_matrix_from_empirical_delay_distr handles returning data over a full number of weeks"", { + # First toy data test + ref_date <- as.Date(""2020-03-01"") + time_series_length <- 100 + + report_delays <- sample(c(0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7, 8, 9, 10), 1000, replace = T) + event_dates <- sample(seq.Date(from = ref_date, length.out = time_series_length, by = ""day""), 1000, replace = T) + + empirical_delay_data <- tibble::tibble( + event_date = event_dates, + report_delay = report_delays + ) %>% + dplyr::arrange(event_date) + + empirical_matrix <- get_matrix_from_empirical_delay_distr( + empirical_delays = empirical_delay_data, + ref_date = ref_date, + n_report_time_steps = 90, + time_step = ""day"", + min_number_cases = 10, + upper_quantile_threshold = 0.99, + fit = ""none"", + num_steps_in_a_unit = 7 + ) + + expect_delay_matrix_sums_lte_1(empirical_matrix, full_cols = 50) +}) + +test_that("".get_matrix_from_empirical_delay_distr returns a matrix with the expected distributions when using fit = gamma"", { + skip_on_cran() + nr_distribution_samples <- 500 + time_steps <- 30 + + # Testing delay matrix with data sampled from constant gamma distribution + original_distribution_shapes <- rep(6, time_steps) + original_distribution_scales <- rep(5, time_steps) + set.seed(1) + result <- .delay_distribution_matrix_rmse_compute(original_distribution_shapes, original_distribution_scales, nr_distribution_samples) + expect_equal(max(result$shape_rmse, 0.07829915), 0.07829915, tolerance = 1E-2) + expect_equal(max(result$scale_rmse, 0.05670633), 0.05670633, tolerance = 1E-2) + + + # Testing delay matrix with data sampled from two different gamma distributions + original_distribution_shapes <- c(rep(3.5, time_steps / 2), rep(6.5, time_steps / 2)) + original_distribution_scales <- c(rep(2, time_steps / 2), rep(3, time_steps / 2)) + set.seed(1) + result <- .delay_distribution_matrix_rmse_compute(original_distribution_shapes, original_distribution_scales, nr_distribution_samples) + expect_equal(max(result$shape_rmse, 0.3193425), 0.3193425, tolerance = 1E-2) # the RMSE gets lower with more time_steps; + expect_equal(max(result$scale_rmse, 0.3808837), 0.3808837, tolerance = 1E-2) # kept the lower time_steps value to reduce running time + + + # Testing delay matrix with data sampled from a different gamma distribution for each timestep + original_distribution_shapes <- sample(seq(3.9, 7.1, by = 0.1), time_steps, replace = TRUE) + original_distribution_scales <- sample(seq(2.9, 6.1, by = 0.1), time_steps, replace = TRUE) + set.seed(1) + result <- .delay_distribution_matrix_rmse_compute(original_distribution_shapes, original_distribution_scales, nr_distribution_samples) + expect_equal(max(result$shape_rmse, 0.2932824), 0.2932824, tolerance = 1E-2) + expect_equal(max(result$scale_rmse, 0.2883487), 0.2883487, tolerance = 1E-2) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-estimate_Re.R",".R","4898","156","test_that(""estimate_Re yields consistent results on a toy example"", { + incidence_data <- c( + 1, 2, 12, 32, 34, 45, 87, 134, 230, 234, 222, 210, 190, 259, + 351, 453, 593, 603, 407, 348, 304, 292, 256, 229, + 132, 98, 86, 54, 39, 23, 3, 2, 12, 14 + ) + + estimated_Re <- estimate_Re( + incidence_data = incidence_data, + estimation_method = ""EpiEstim sliding window"", + minimum_cumul_incidence = 0, + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + mean_Re_prior = 1 + ) + + reference_values <- c( + 7.01, 6.18, 6.4, 5.32, 3.83, 2.46, 1.67, 1.42, 1.51, 1.84, + 2.22, 2.31, 1.89, 1.32, 0.89, 0.73, 0.65, 0.62, 0.53, 0.43, 0.33, 0.29, + 0.26, 0.2, 0.14, 0.08, 0.06, 0.14 + ) + reference_offset <- 6 + + expect_equal(.get_values(estimated_Re), reference_values, tolerance = 1E-2) + expect_identical(.get_offset(estimated_Re), reference_offset) +}) + +test_that(""estimate_Re is consistent with piecewise estimates"", { + incidence_data <- c( + 1, 2, 12, 32, 34, 45, 87, 134, 230, 234, 222, 210, 190, 259, + 351, 453, 593, 603, 407, 348, 304, 292, 256, 229, + 132, 98, 86, 54, 39, 23, 3, 2, 12, 14 + ) + + piecewise_estimated_Re <- estimate_Re( + incidence_data = incidence_data, + estimation_method = ""EpiEstim piecewise constant"", + minimum_cumul_incidence = 0, + interval_length = 7, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + mean_Re_prior = 1 + ) + + reference_piecewise_values <- c( + 7.46, 7.46, 7.46, 7.46, 7.46, 7.46, 7.46, + 2.03, 2.03, 2.03, 2.03, 2.03, 2.03, 2.03, + 1.28, 1.28, 1.28, 1.28, 1.28, 1.28, 1.28, + 0.41, 0.41, 0.41, 0.41, 0.41, 0.41, 0.41, + 0.12, 0.12, 0.12, 0.12, 0.12 + ) + + reference_piecewise_offset <- 1 + + expect_equal(.get_values(piecewise_estimated_Re), reference_piecewise_values, tolerance = 1E-2) + expect_identical(.get_offset(piecewise_estimated_Re), reference_piecewise_offset) + + piecewise_estimated_Re <- estimate_Re( + incidence_data = incidence_data, + estimation_method = ""EpiEstim piecewise constant"", + minimum_cumul_incidence = 0, + interval_ends = c(9, -2, 0, 1, 14, 49, 78, 34), + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + mean_Re_prior = 1 + ) + + reference_piecewise_values <- c( + 7.1, 7.1, 7.1, 7.1, 7.1, 7.1, 7.1, 7.1, + 1.83, 1.83, 1.83, 1.83, 1.83, 0.83, 0.83, + 0.83, 0.83, 0.83, 0.83, 0.83, 0.83, 0.83, + 0.83, 0.83, 0.83, 0.83, 0.83, 0.83, 0.83, + 0.83, 0.83, 0.83, 0.83 + ) + + reference_piecewise_offset <- 1 + + expect_equal(.get_values(piecewise_estimated_Re), reference_piecewise_values, tolerance = 1E-2) + expect_identical(.get_offset(piecewise_estimated_Re), reference_piecewise_offset) +}) + +test_that(""estimate_Re outputs HPD intervals when asked"", { + incidence_data <- c( + 1, 2, 12, 32, 34, 45, 87, 134, 230, 234, 222, 210, 190, 259, + 351, 453, 593, 603, 407, 348, 304, 292, 256, 229, + 132, 98, 86, 54, 39, 23, 3, 2, 12, 14 + ) + + estimated_Re <- estimate_Re( + incidence_data = incidence_data, + estimation_method = ""EpiEstim sliding window"", + minimum_cumul_incidence = 0, + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + mean_Re_prior = 1, + output_HPD = TRUE, + simplify_output = TRUE + ) + + reference_index <- c( + 6, 7, 8, 9, 10, 11, 12, 13, + 14, 15, 16, 17, 18, 19, 20, 21, 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33 + ) + + reference_values_lowHPD <- c( + 5.99, 5.46, 5.82, + 4.91, 3.55, 2.28, 1.54, 1.31, 1.41, + 1.73, 2.11, 2.2, 1.79, 1.25, 0.83, + 0.68, 0.61, 0.58, 0.49, 0.39, 0.3, + 0.25, 0.22, 0.17, 0.11, 0.05, 0.04, 0.09 + ) + + expect_identical(estimated_Re$idx, reference_index) + expect_equal(estimated_Re$Re_lowHPD, reference_values_lowHPD, tolerance = 1E-2) +}) + + +test_that(""estimate_Re yields consistent results with imports"", { + incidence_data <- c( + 0, 0, 0, 1, 2, 12, 32, 34, 45, 87, 134, 230, 234, 222, 210, 190, 259, + 351, 453, 593, 603, 407, 348, 304, 292, 256, 229, + 132, 98, 86, 54, 39, 23, 3, 2, 12, 14 + ) + + import_incidence_data <- c( + 2, 1, 0, 1, 2, 4, 0, 0, 0, 0, 0, 0, 0, 0, 2, 3, 2, 1, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 21, 2, 0 + ) + + estimated_Re <- estimate_Re( + incidence_data = incidence_data, + import_incidence_input = import_incidence_data, + estimation_method = ""EpiEstim sliding window"", + minimum_cumul_incidence = 0, + estimation_window = 3, + mean_serial_interval = 4.8, + std_serial_interval = 2.3, + mean_Re_prior = 1 + ) + + reference_values <- c( + 15.34, 12.47, 7.81, 5.97, 5.67, 6.14, + 5.22, 3.8, 2.45, 1.66, 1.42, 1.51, 1.83, + 2.21, 2.3, 1.88, 1.32, 0.89, 0.73, 0.65, + 0.62, 0.53, 0.43, 0.33, 0.29, 0.25, 0.2, + 0.14, 0.08, 0.06, 0.14 + ) + reference_offset <- 6 + + expect_equal(.get_values(estimated_Re), reference_values, tolerance = 1E-2) + expect_identical(.get_offset(estimated_Re), reference_offset) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-bootstrap.R",".R","1249","31","test_that(""get_bootstrap_replicate outputs difference values with same median and bounds as original difference values"", { + skip_on_cran() + expect_bootstrapped_diff_bounded_by_original_diff <- function(...) { + data_points_incl <- 21 + degree <- 1 + original_values <- sample.int(1000, size = 10000, replace = TRUE) + + log_original <- log(original_values + 1) + smoothed_log <- smooth_incidence(log_original, smoothing_method = ""LOESS"", data_points_incl = data_points_incl) + + diff_smoothed_original <- log_original - smoothed_log + + bootstrap_replicate <- get_bootstrap_replicate( + incidence_data = original_values, + bootstrapping_method = ""non-parametric block boostrap"", + data_points_incl = data_points_incl, + degree = degree, + round_incidence = FALSE + ) + + log_bootstrap <- log(bootstrap_replicate + 1) + diff_smoothed_bootstrap <- log_bootstrap - smoothed_log + + expect_equal(median(diff_smoothed_bootstrap), median(diff_smoothed_original), tolerance = 0.1) + expect_gte(min(diff_smoothed_bootstrap), min(diff_smoothed_original) - 1E-1) + expect_lte(max(diff_smoothed_bootstrap), max(diff_smoothed_original) + 1E-1) + } + + sapply(1:10, expect_bootstrapped_diff_bounded_by_original_diff) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-utils-convolution.R",".R","6584","236","test_that("".convolve_delay_distribution_vectors returns a vector whose elements sum up to 1"", { + N <- 100 + shapes <- stats::runif(2 * N, min = 0, max = 10) + scales <- stats::runif(2 * N, min = 0, max = 10) + + distribution_list <- lapply(1:length(shapes), function(i) { + return(list(name = ""gamma"", shape = shapes[i], scale = scales[i])) + }) + + delay_distribution_vectors <- lapply(distribution_list, function(x) { + build_delay_distribution(x, + max_quantile = 0.9999 + ) + }) + + convolved_distribution_vectors <- sapply(1:N, function(x) { + .convolve_delay_distribution_vectors( + delay_distribution_vectors[[2 * x]], + delay_distribution_vectors[[2 * x - 1]] + ) + }) + + max_difference_to_1 <- max(abs(sapply(delay_distribution_vectors, sum) - 1)) + + expect_equal(max_difference_to_1, 0, tolerance = 1E-4) +}) + +test_that("".convolve_delay_distribution_vectors returns correct output on a simple example"", { + delay_distribution_vector_1 <- c(0, 0.25, 0.1, 0.65) + delay_distribution_vector_2 <- c(0.2, 0.2, 0.3, 0.3) + ref_convolved_output <- c(0, 0.05, 0.07, 0.225, 0.235, 0.225, 0.195, 0) + + convolved_output <- .convolve_delay_distribution_vectors( + delay_distribution_vector_1, + delay_distribution_vector_2 + ) + + expect_equal(convolved_output, ref_convolved_output, tolerance = 1E-4) +}) + +test_that(""convolve_delays returns same output as empirical method of convoluting gammas"", { + shape_incubation <- 3.2 + scale_incubation <- 1.3 + + incubation_delay <- list( + name = ""gamma"", + shape = shape_incubation, + scale = scale_incubation + ) + + shape_onset_to_report <- 2.7 + scale_onset_to_report <- 1.6 + + onset_to_report_delay <- list( + name = ""gamma"", + shape = shape_onset_to_report, + scale = scale_onset_to_report + ) + + convolved_output <- convolve_delays( + delays = list(incubation_delay, onset_to_report_delay) + ) + + empirical_convolution_result <- c( + 0, 9e-04, 0.00947, 0.03214, 0.06438, 0.09523, 0.11566, + 0.12255, 0.11742, 0.1043, 0.08721, 0.06951, 0.05329, + 0.03951, 0.02841, 0.01991, 0.01371, 0.00925, 0.00616, + 0.00402, 0.0026, 0.00165, 0.00105, 0.00065, 0.00039, + 0.00025, 0.00015, 9e-05, 6e-05, 3e-05, 2e-05, 1e-05, + 1e-05, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + ) + + padded_convolved_output <- c(convolved_output, rep(0, times = length(empirical_convolution_result) - length(convolved_output))) + + absolute_diff <- abs(padded_convolved_output - empirical_convolution_result) + + expect_equal(max(absolute_diff), 0, tolerance = 1E-3) +}) + +test_that("".convolve_delay_distribution_vector_with_matrix returns correct output on a simple example"", { + vector_a <- c(0.2, 0.3, 0.5) + matrix_b <- matrix(c( + 0.1, 0, 0, + 0.3, 0.2, 0, + 0.6, 0.4, 0.15 + ), + nrow = 3, + ncol = 3, + byrow = TRUE + ) + + ref_convolved_matrix_vector_first <- matrix(c( + 0.02, 0, 0, 0, 0, + 0.09, 0.02, 0, 0, 0, + 0.26, 0.09, 0.02, 0, 0, + 0.33, 0.31, 0.12, 0.04, 0, + 0.30, 0.38, 0.315, 0.125, 0.03 + ), + nrow = 5, + ncol = 5, + byrow = TRUE + ) + + ref_convolved_matrix_vector_last <- matrix(c( + 0.02, 0, 0, + 0.09, 0.04, 0, + 0.26, 0.14, 0.03 + ), + nrow = 3, + ncol = 3, + byrow = TRUE + ) + + convolved_matrix_vector_first <- .convolve_delay_distribution_vector_with_matrix( + vector_a = vector_a, + matrix_b = matrix_b, + vector_first = T + ) + + convolved_matrix_vector_last <- .convolve_delay_distribution_vector_with_matrix( + vector_a = vector_a, + matrix_b = matrix_b, + vector_first = F + ) + + + expect_equal(convolved_matrix_vector_first, ref_convolved_matrix_vector_first) + expect_equal(convolved_matrix_vector_last, ref_convolved_matrix_vector_last) +}) + +test_that("".convolve_delay_distribution_vector_with_matrix returns valid output"", { + vector_a <- c(0.2, 0.3, 0.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) + vector_b <- c(0.3, 0.13, 0.42, 0.14, 0.01) + matrix_b <- .get_delay_matrix_from_delay_distributions(vector_b, N = 20) + + convolved_matrix_vector_first <- .convolve_delay_distribution_vector_with_matrix( + vector_a = vector_a, + matrix_b = matrix_b, + vector_first = T + ) + + convolved_matrix_vector_last <- .convolve_delay_distribution_vector_with_matrix( + vector_a = vector_a, + matrix_b = matrix_b, + vector_first = F + ) + + expect_delay_matrix_sums_lte_1(convolved_matrix_vector_first, full_cols = 10) + expect_delay_matrix_sums_lte_1(convolved_matrix_vector_last, full_cols = 10) +}) + +test_that("".convolve_delay_distribution_matrices returns valid output"", { + skip(""Function is not ready yet."") + + vector_a <- c(0.21, 0.14, 0.17, 0.09, 0.01, 0.27, 0.11) + vector_b <- c(0.3, 0.13, 0.42, 0.14, 0.01) + matrix_a <- .get_delay_matrix_from_delay_distributions(vector_a, N = 30) + matrix_b <- .get_delay_matrix_from_delay_distributions(vector_b, N = 30) + + convolved_matrix_ab <- .convolve_delay_distribution_matrices( + matrix_a = matrix_a, + matrix_b = matrix_b + ) + + convolved_matrix_ba <- .convolve_delay_distribution_matrices( + matrix_a = matrix_b, + matrix_b = matrix_a + ) + + expect_delay_matrix_sums_lte_1(convolved_matrix_ab, full_cols = 10) + expect_delay_matrix_sums_lte_1(convolved_matrix_ba, full_cols = 10) +}) + +test_that("".convolve_delays is consistent on convolving several vectors and matrices"", { + vector_a <- c(0.2, 0.3, 0.5) + matrix_b <- matrix(c( + 0.1, 0, 0, + 0.3, 0.2, 0, + 0.6, 0.4, 0.15 + ), + nrow = 3, + ncol = 3, + byrow = TRUE + ) + vector_c <- c(0.2, 0.2, 0.3, 0.3) + + convolved_output <- convolve_delays( + delays = list(vector_a, matrix_b, vector_c) + ) + + ref_result <- matrix( + c( + 0.004, 0.000, 0.000, 0.000, 0.000, + 0.022, 0.004, 0.000, 0.000, 0.000, + 0.076, 0.022, 0.004, 0.000, 0.000, + 0.151, 0.086, 0.028, 0.008, 0.000, + 0.231, 0.171, 0.093, 0.033, 0.006 + ), + nrow = 5, + ncol = 5, + byrow = TRUE + ) + + expect_equal(convolved_output, ref_result, tolerance = 1E-3) +}) + + +test_that("".convolve_delays can work with a single delay as input"", { + vector_a <- c(0.2, 0.3, 0.5) + + convolved_output <- convolve_delays( + delays = vector_a + ) + + ref_result <- c(0.2, 0.3, 0.5) + + expect_equal(convolved_output, ref_result, tolerance = 1E-3) + + shape_incubation <- 3.2 + scale_incubation <- 1.3 + + incubation_delay <- list( + name = ""gamma"", + shape = shape_incubation, + scale = scale_incubation + ) + + convolved_output <- convolve_delays( + delays = incubation_delay + ) + + ref_result <- c(0, 0.08, 0.17, 0.2, 0.17, 0.13, 0.09, 0.06, 0.04, 0.02, 0.01, 0.01, 0, 0, 0, 0, 0) + + expect_equal(convolved_output, ref_result, tolerance = 5E-2) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-smooth.R",".R","2431","55","# TODO test LOESS function +# 1) always positive values +# 2) normalized total +# 3) constant output if constant input +# 4) add a ""reference"" test for a more complicated use-case +# 5) test that min and max of smoothed values are bounded by min and max of noisy values + +smoothing_methods_tested <- c(""LOESS"") + +test_that(""smooth_incidence output values are positive "", { + random_values <- sample.int(1000, size = 100, replace = TRUE) + + sapply(smoothing_methods_tested, function(x) { + smoothed_values <- .get_values(smooth_incidence(random_values, smoothing_method = x)) + expect_gte(min(smoothed_values), 0) + }) +}) + +test_that(""smooth_incidence output values sum up to the total of the original incidence "", { + random_values <- sample.int(1000, size = 100, replace = TRUE) + + sapply(smoothing_methods_tested, function(x) { + smoothed_values <- .get_values(smooth_incidence(random_values, smoothing_method = x)) + expect_equal(sum(smoothed_values), sum(random_values)) + }) +}) + +test_that(""smooth_incidence output values are constant if input is constant "", { + # TODO unskip test when added way to deal with left-truncated incidence input + skip(""Left-truncated incidence input cannot be dealt with yet."") + constant_values <- rep(5.3, times = 100) + + sapply(smoothing_methods_tested, function(x) { + smoothed_values <- .get_values(smooth_incidence(constant_values, smoothing_method = x)) + expect_equal(smoothed_values, constant_values) + }) +}) + +test_that(""smooth_incidence output values are bounded by bounds of noisy values"", { + random_values <- sample.int(1000, size = 100, replace = TRUE) + + sapply(smoothing_methods_tested, function(x) { + smoothed_values <- .get_values(smooth_incidence(random_values, smoothing_method = x)) + expect_gte(min(smoothed_values), min(random_values)) + expect_lte(max(smoothed_values), max(random_values)) + }) +}) + +test_that(""smooth_incidence output stays consistent for LOESS method"", { + noisy_values <- c(272, 78, 859, 642, 411, 612, 192, 262, 399, 371, 69, 80, 221, 945, 198, 896, 705, 155, 498, 795) + ref_smoothed_values <- c(220.682, 245.42, 271.962, 299.524, 323.565, 345.538, 368.764, 391.508, 412.034, 431.246, 450.882, 470.464, 489.509, 507.909, 526.037, 544.136, 562.446, 580.942, 599.448, 617.983) + smoothed_values <- .get_values(smooth_incidence(noisy_values, smoothing_method = ""LOESS"")) + expect_equal(smoothed_values, ref_smoothed_values, tolerance = 1E-3) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-utils-nowcast.R",".R","722","20","test_that(""nowcast() is correct on a simple example"", { + toy_incidence <- c(9, 3, 4, 7, 8, 0, 2, 1, 0) + toy_delay <- c(0.05, 0.05, 0.3, 0.2, 0.1, 0.3) + + corrected_result <- nowcast(toy_incidence, toy_delay, cutoff_observation_probability = 0.11) + + ref_values <- c(9, 3, 4, 7, 11.429, 0, 5) + expect_equal(corrected_result$values, ref_values, tolerance = 0.01) +}) + + +test_that(""nowcast() is correct with large gap_to_present"", { + toy_incidence <- c(9, 3, 4, 7, 8, 0, 2, 1, 0) + toy_delay <- c(0.05, 0.05, 0.3, 0.2, 0.1, 0.3) + + corrected_result <- nowcast(toy_incidence, toy_delay, cutoff_observation_probability = 0.11, gap_to_present = 10) + + expect_equal(corrected_result$values, toy_incidence, tolerance = 0.01) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-utils-distribution.R",".R","1400","44","# TODO TEST that: +# 1) build_delay_distribution throws error when unsupported distribution_type is thrown in +# and when unsuitable parameter values are thrown in (not numeric, or negative values for instance) + +test_that(""build_delay_distribution returns a vector whose elements sum up to 1"", { + N <- 100 + shapes <- stats::runif(N, min = 0, max = 10) + scales <- stats::runif(N, min = 0, max = 10) + + distribution_list <- lapply(1:length(shapes), function(i) { + return(list(name = ""gamma"", shape = shapes[i], scale = scales[i])) + }) + + delay_distribution_vectors <- lapply(distribution_list, function(x) { + build_delay_distribution(x, + max_quantile = 0.9999 + ) + }) + + max_difference_to_1 <- max(abs(sapply(delay_distribution_vectors, sum) - 1)) + + expect_equal(max_difference_to_1, 0, tolerance = 1E-4) +}) + + +test_that("".get_delay_matrix_from_delay_distributions returns valid output"", { + N <- 100 + + shapes <- stats::runif(N, min = 0, max = 10) + scales <- stats::runif(N, min = 0, max = 10) + + distribution_list <- lapply(1:length(shapes), function(i) { + return(list(name = ""gamma"", shape = shapes[i], scale = scales[i])) + }) + + matrix_result <- .get_delay_matrix_from_delay_distributions( + distributions = distribution_list, + max_quantile = 0.999 + ) + + # Check that all columns sum up to less than one. + expect_delay_matrix_sums_lte_1(matrix_result, full_cols = 0) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-utils-output.R",".R","593","10","test_that("".get_module_output() deals with well-formatted input"", { + results <- c(1, 2, 3, -2, 0, 0) + input_1 <- list(values = c(1, 1, 1, 1, 1), index_offset = 0) + input_2 <- list(values = c(1, 1, 1, 1, 1), index_offset = -1) + + expect_identical(.get_module_output(results, input_1$index_offset), list(values = results, index_offset = 0)) + expect_identical(.get_module_output(results, input_2$index_offset), list(values = results, index_offset = -1)) + expect_identical(.get_module_output(results, input_2$index_offset, additional_offset = 3), list(values = results, index_offset = 2)) +}) +","R" +"Nowcasting","covid-19-Re/estimateR","tests/testthat/test-deconvolve.R",".R","6396","183","test_that(""deconvolve_incidence yields consistent results on a toy constant-delay example"", { + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + delay_distribution <- c( + 0, 0.015, 0.09, 0.168, + 0.195, 0.176, 0.135, 0.091, 0.057, 0.034, + 0.019, 0.01, 0.005, 0.003, + 0.001, 0.001 + ) + + deconvolved_incidence <- deconvolve_incidence( + incidence_data = toy_incidence_data, + deconvolution_method = ""Richardson-Lucy delay distribution"", + delay = delay_distribution, + threshold_chi_squared = 1, + max_iterations = 100 + ) + + reference_values <- c(4.9,6.6,8.3,10.9,14.4,18.8,27,36.5, + 47.4,60.3,75.1,91.9,110.9,133,157.6, + 185.9,216.9,246.6,272.5,299.2,320.7, + 333.6,343,345.4,336.7,323.2,306.2,283, + 261.1,239.6,216.6,193.9,171.2,145.3, + 117.2,89.1,63.4) + reference_offset <- -5 + + expect_lte(max(abs(.get_values(deconvolved_incidence) - reference_values)), expected = 1) + expect_identical(.get_offset(deconvolved_incidence), reference_offset) +}) + +test_that(""deconvolve_incidence yields consistent results on a toy moving-through-time example"", { + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + ref_date <- as.Date(""2020-04-01"") + n_days <- 50 + delay_increase <- 1.5 + shape_initial_delay <- 6 + scale_initial_delay <- 1.5 + distribution_initial_delay <- list(name = ""gamma"", shape = shape_initial_delay, scale = scale_initial_delay) + seed <- 734 + + generated_empirical_delays <- .generate_delay_data( + origin_date = ref_date, + n_time_steps = n_days, + ratio_delay_end_to_start = 1.5, + distribution_initial_delay = distribution_initial_delay, + seed = seed + ) + + shape_incubation <- 3.2 + scale_incubation <- 1.3 + distribution_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + + deconvolved_incidence <- deconvolve_incidence( + incidence_data = toy_incidence_data, + deconvolution_method = ""Richardson-Lucy delay distribution"", + delay = list( + distribution_incubation, + generated_empirical_delays + ), + ref_date = ref_date, + min_number_cases = 20, + threshold_chi_squared = 1, + max_iterations = 100, + fit = ""none"" + ) + + reference_values <- c(3.6,4.9,6.2,8.2,11,14.8, + 21.8,30.2,40.4,53.2,69,87.6, + 108.8,133.8,162.3,195.9,234.4, + 273.5,309,344.4,372.2, + 388,396.9,394.8,377.6,353.2, + 323.9,287.9,254.6, + 223.8,194.3,167.3,141.2, + 113.8,87.6,64.1,43.9) + + reference_offset <- -11 + + expect_lte(max(abs(.get_values(deconvolved_incidence) - reference_values)), expected = 0.1) + expect_identical(.get_offset(deconvolved_incidence), reference_offset) +}) + +test_that(""deconvolve_incidence takes into account extra parameters for get_matrix_empirical_delay_distr"", { + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66 + ) + + ref_date <- as.Date(""2020-04-01"") + n_days <- 50 + delay_increase <- 1.5 + shape_initial_delay <- 6 + scale_initial_delay <- 1.5 + distribution_initial_delay <- list(name = ""gamma"", shape = shape_initial_delay, scale = scale_initial_delay) + seed <- 734 + + generated_empirical_delays <- .generate_delay_data( + origin_date = ref_date, + n_time_steps = n_days, + ratio_delay_end_to_start = 3, + distribution_initial_delay = distribution_initial_delay, + seed = seed + ) + + shape_incubation <- 3.2 + scale_incubation <- 1.3 + distribution_incubation <- list(name = ""gamma"", shape = shape_incubation, scale = scale_incubation) + fit <- ""gamma"" + min_number_cases <- 20 + + deconvolved_incidence <- deconvolve_incidence( + incidence_data = toy_incidence_data, + deconvolution_method = ""Richardson-Lucy delay distribution"", + delay = list( + distribution_incubation, + generated_empirical_delays + ), + ref_date = ref_date, + threshold_chi_squared = 1, + max_iterations = 100, + fit = fit, + min_number_cases = min_number_cases + ) + + reference_values <- c(2.4,3.3,4.3,5.7,7.8,10.5,15.6,21.9,29.7,39.8,52.6, + 68.5,87.7,111.4,138.9,171.6,208.8,246.8,282.8,321, + 354,376.6,393.3,399.8,390.7,373.6,350.6,319,288.1, + 257.5,225.5,194.9,165.8,135.6,105.6,77.7,53.6) + + reference_offset <- -14 + + expect_lte(max(abs(.get_values(deconvolved_incidence) - reference_values)), expected = 0.1) # Work-around because weird behaviour of expect_equal() + expect_identical(.get_offset(deconvolved_incidence), reference_offset) +}) + +test_that(""deconvolve_incidence handles incidence with 0s at the end"", { + toy_incidence_data <- c( + 6, 8, 10, 13, 17, 22, 31, 41, 52, 65, 80, 97, 116, + 138, 162, 189, 218, 245, 268, 292, 311, 322, 330, + 332, 324, 312, 297, 276, 256, 236, 214, 192, 170, + 145, 118, 91, 66, 32, 14, 0, 0 + ) + + delay_distribution <- c( + 0, 0.015, 0.09, 0.168, + 0.195, 0.176, 0.135, 0.091, 0.057, 0.034, + 0.019, 0.01, 0.005, 0.003, + 0.001, 0.001 + ) + + deconvolved_incidence <- deconvolve_incidence( + incidence_data = toy_incidence_data, + deconvolution_method = ""Richardson-Lucy delay distribution"", + delay = delay_distribution, + threshold_chi_squared = 1, + max_iterations = 100 + ) + + reference_values <- c(4.8,6.4,8.1,10.7,14.1,18.4, + 26.4,35.8,46.9,60.1,74.9,91.5, + 110.1,132,156.4,184.6,216,246.4, + 273,300.4,322.3,335.8,345.5, + 347.9,339,325.1,307.4,283.6, + 261.5,240,217.3,195.1,171.8, + 144,112.1,79.1,48.9,18.2,5.2,0,0) + reference_offset <- -5 + + expect_lte(max(abs(.get_values(deconvolved_incidence) - reference_values)), expected = 1) + expect_identical(.get_offset(deconvolved_incidence), reference_offset) +}) +","R" +"Nowcasting","JasonApke/OCTANE","include/oct_bicubic.h",".h","316","6","//Definitions for bicubic +static double oct_cell( double v[4], double x); +static double oct_bicubic_cell( double p[4][4], double x, double y); +double oct_bicubic(double * input, double uu, double vv, int nx, int ny,int inout); +double oct_bicubic_float(float * input, double uu, double vv, int nx, int ny,int inout); +","Unknown" +"Nowcasting","JasonApke/OCTANE","include/util.h",".h","292","11","//A header containing function definitions for UTIL +// + +double **dMatrix(int, int); +double ***dImage(int, int,int); +void free_dImage(double ***,int,int); +void free_dMatrix(double **,int); +void dMatrix_initzero(double **, int, int); +float **fMatrix(int, int); +void free_fMatrix(float **,int); +","Unknown" +"Nowcasting","JasonApke/OCTANE","include/oct_gaussian.h",".h","245","7","//A header containing function definitions for gaussian + +void oct_getGaussian(double **, int,double); +void oct_getGaussian_1D(double *, int,double); +void oct_gaussian(double *, int,int,double); +void oct_gaussian2(double **, int,int,double,int); +","Unknown" +"Nowcasting","JasonApke/OCTANE","include/goesread.h",".h","1556","58","//A header containing class definitions for GOESREAD + +class GOESNAVVar +{ + public: + double pph,req,rpol,lam0,inverse_flattening,lat0; + float gipVal,xScale,xOffset,yScale,yOffset,g2xOffset,g2yOffset,fk1,fk2,bc1,bc2,lpo,kap1,radScale,radOffset; + float fk12,fk22,bc12,bc22,kap12,fk13,fk23,bc13,bc23,kap13; + float radScale2, radScale3, radOffset2,radOffset3; + long nx2,ny2,nx3,ny3; + long nx,ny,CTHx,CTHy; + int minXc,maxXc,minYc,maxYc,minX,minY,maxX,maxY; + float lat1,lon1,lon0,R; +}; + +class GOESVar +{ + //Access Specifier + public: + float *latVal; + float *lonVal; + short *x; + short *y; + short *CTP,*CTT; + unsigned char *CTI; + float *dataVal, *dataVal2, *dataVal3; + Image data; + float *dataVal2i, *dataVal3i; + short *occlusion; + short *uVal; + short *vVal; + short *uVal2; + short *vVal2; + short *cnrarr; + float *uPix; + float *vPix; + double *u1; + double *v1; + float *UFG; + float *VFG; + double *u2; + double *v2; + short *accel; + float *CTHVal; + float *CTTVal; + unsigned char *CTHInv; + short *dataSVal,*dataSVal2, *dataSVal3; + short *dataSValint,*dataSValint2,*dataSValint3; + float *dataSValfloat,*dataSValfloat2,*dataSValfloat3; + double t; + double tint; + float dT; + float frdt; + int band,band2,band3; + GOESNAVVar nav; + std::string tUnits; +}; +","Unknown" +"Nowcasting","JasonApke/OCTANE","include/oct_bc.h",".h","250","21","template +T oct_bc(T x, int nx,bool &bc) +{ + bc=false; + T result; + result = x; + if(x < 0) + { + result = 0; + bc=true; + } + if(x >= nx) + { + result = nx-1; + bc=true; + } + return result; +} + + +","Unknown" +"Nowcasting","JasonApke/OCTANE","include/image.h",".h","443","26","#include + +class Image +{ + public: + float *data; + int nrow, ncol,nchannels; + Image(){ + nrow = 0; + ncol = 0; + nchannels=0; + }; + Image(int x, int y, int c){ + nrow = x; + ncol = y; + nchannels=c; + }; + void setdims(int x, int y, int c){ + nrow = x; + ncol = y; + nchannels = c; + } + +}; + +","Unknown" +"Nowcasting","JasonApke/OCTANE","include/zoom.h",".h","428","13","//A header containing function definitions for zoom +// + +void oct_zoom_size(int,int,int&,int&,double); + +void oct_zoom_out(double *, double *, int, int, double,int); +void oct_zoom_out_float(float *, float *, int, int, double,int,int); +void oct_zoom_out_2d(double *, double **, int, int, double,int); + +void oct_zoom_in(double *, double *, int, int, int, int); +void oct_zoom_in_float(float *, float *, int, int, int, int,int,int); + +","Unknown" +"Nowcasting","JasonApke/OCTANE","include/offlags.h",".h","3197","74","//A header containing class definitions and default settings for OF Flags +#include + +class OFFlags +{ + //Access Specifier + public: + int farn; //Do farneback optical flow instead + int pixuv; //Output pixel displacement instead of UV displacement + int dopolar; //Do a polar grid (this is an orthonormal grid I designed in convert_dnbh5 and polar_grid_module.py) + int domerc; //Do a Mercator grid (this is a mercator grid that comes out of my read_alpw.py module) + int doahi; //do AHI data + int dosrsal; + int dososm; + int dofirstguess; //an arguement to tell optical flow to use a first guess flow field (MUST HAVE NETCDF OF SAME SIZE AS INPUT FILE!!!!!!!!!!) + std::string ftype; + int dointerp; //the arg to tell optical flow to interpolate + int docorn; //An argument to return corners in the image (used for image navigation for winds products) + int putinterp; //an extra argument to handle interp writing portion, not set by user + int interpcth; //A flag to switch CTH (or IR) between bilinear and nearest neighbor interpolation + //for zooming in (to visible resolution, doesn't matter for IR resolution) + int doinv; //an argument to read the inversion flag data out of the clavrx files, and output it with the OF file + int doctt; //an argument to read the cloud-top temperature flag data out of the clavrx files and output it with the OF file + int dozim; //a flag to turn on zimmerman etal (2011) data term normalization + int oftype; + int doc2; + int doc3; + int ir; + int rad; //sosm target size + int srad; //sosm search radius size + int setdevice; //integer to set which gpu to use (useful for multi-gpu machines) + //Farneback Defaults + float fpyr_scale; + float flevels; + int fwinsize; + int fiterations; + int poly_n; + float poly_sigma; + float deltat; + int uif; + int fg; //this is Farneback Gaussian settings, NOT do first guess*** + int doCTH; + //Modified Sun Defaults + double lambda; + double alpha; + double alpha2; + double lambdac; + double scsig; + double filtsigma; + double scaleF; //pyramid scale factor, not changable on command line at the moment + int kiters; //outer iterations or pyramid levels +1 + int liters; //inner iterations or number of cg solving update steps per GNC level + int cgiters; //conjugate gradient iterations maximum, will stop when error is permissably low + int miters; + int setnorms; //flag to set the normalization min max values + float NormMax; //currently not set by user, lets change that! + float NormMin; + float NormMax2; + float NormMin2; + float NormMax3; + float NormMin3; + bool outnav; //GOES output options, all default to true + bool outraw; + bool outrad; + bool outctp; + bool setNormMax; + bool setNormMin; + bool setNormMax2; + bool setNormMin2; + bool setNormMax3; + bool setNormMin3; +}; + +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/oct_fileread.cc",".cc","27935","896","#include +#include +#include +#include ""image.h"" +#include ""goesread.h"" +#include ""offlags.h"" +#include ""zoom.h"" +using namespace std; +using namespace netCDF; +using namespace netCDF::exceptions; + +//Function: jma_goesread +//Purpose: This is a C++ function which reads, navigates, and calibrates GOES-R netcdf4 data +// Now with added functions to read/navigate polar orthonormal/mercator +//Requires: The netcdf C++ library +//Inputs: fpath- a full path to the netcdf file containing GOES-R Imagery +// cal- A setting for callibration, default is RAW, options include BRIT, TEMP, and RAW +// donav- set to 1 to return latitude and longitude arrays with the object, otherwise they are set to 0 +// The reason I have this as an option is for speed purposes, navigation can slow down optical flow code if it is not needed +//Returns: A structure containing the calbrated GOES data, with Latitude and Longitude files from navigation +// +//Author: Jason Apke, Updated 9/10/2018 +void oct_navcal_cuda(short *,short *, short *, short *, short *, short *, int, + int, int, int, int, int, float *, float *, + float *,string,int,float, float, float, + float, float, float, float, float, + float, float,float,float,float,float, + float,float,float,float,float,int,OFFlags); +void oct_polar_navcal_cuda(float *,short *, short *, short *, short *, short *, int, + int, int, int, int, int, float *, float *, + float *,float, float, float, + float, float, float, + float,int,int,OFFlags); +void oct_merc_navcal_cuda(float *,short *, short *, short *, short *, short *, int, + int, int, int, int, int, float *, float *, + float *,float, float, float, + float, float, + float,int,OFFlags); +void oct_bandminmax(int, float &, float &); + + +static const int NC_ERR = 2; +int oct_goesread (string fpath,string cal,int donav,int channelnum, GOESVar &resVar,OFFlags &args) +{ + //This is a function designed for reading GOES data files + using namespace std; + const double PI=3.14159265359; + const double DTOR = PI/180.; + + long nv; + long xdimsize,ydimsize; + int datf=0; + float *lat,*lon,*data3; + short *data2; + short *y; + short *x; + short *data2s; + short *ys; + short *xs; + int band; + float xScale,yScale,xOffset,yOffset,lpo; + float radScale,radOffset; + float req,rpol,pph,lam0,inverse,lat0; + float fk1,fk2,bc1,bc2,kap1; + float gipv; + string tUnitString; + NcVarAtt reqVar; + try + { + //open the file + NcFile dataFile(fpath, NcFile::read); + NcVar xVar, yVar,dataVar,gipVar,tVar,bandVar; + int xv = dataFile.getVarCount(); + multimap< string, NcDim > xxv=dataFile.getDims(); + NcDim ydim=xxv.find(""y"")->second; + NcDim xdim=xxv.find(""x"")->second; + + ydimsize=ydim.getSize(); + xdimsize=xdim.getSize(); + nv = xdimsize*ydimsize; + data2= new short[nv]; + if(!data2){ + cout << ""Memory Allocation Failed\n""; + exit(0); + } + + x = new short[xdimsize]; + if(!x){ + cout << ""Memory Allocation Failed y\n""; + exit(0); + } + y = new short[ydimsize]; + if(!y){ + cout << ""Memory Allocation Failed y\n""; + exit(0); + } + + + dataVar=dataFile.getVar(""Rad""); + yVar = dataFile.getVar(""y""); + xVar = dataFile.getVar(""x""); + tVar = dataFile.getVar(""t""); + bandVar = dataFile.getVar(""band_id""); + reqVar=dataVar.getAtt(""scale_factor""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&radScale); + if(channelnum == 1){ + resVar.nav.radScale = radScale; + } + if(channelnum == 2){ + resVar.nav.radScale2 = radScale; + } + if(channelnum == 3){ + resVar.nav.radScale3 = radScale; + } + reqVar=dataVar.getAtt(""add_offset""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&radOffset); + if(channelnum == 1){ + resVar.nav.radOffset = radOffset; + } + if(channelnum == 2){ + resVar.nav.radOffset2 = radOffset; + } + if(channelnum == 3){ + resVar.nav.radOffset3 = radOffset; + } + if(channelnum == 1) + { + reqVar=yVar.getAtt(""scale_factor""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&yScale); + resVar.nav.yScale=yScale; + reqVar=yVar.getAtt(""add_offset""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&yOffset); + resVar.nav.yOffset=yOffset; + reqVar=xVar.getAtt(""scale_factor""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&xScale); + resVar.nav.xScale=xScale; + reqVar=xVar.getAtt(""add_offset""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&xOffset); + resVar.nav.xOffset = xOffset; + reqVar=tVar.getAtt(""units""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(tUnitString); + resVar.tUnits=tUnitString; + gipVar = dataFile.getVar(""goes_imager_projection""); + gipVar.getVar(&gipv); + resVar.nav.gipVal=gipv; + + reqVar=gipVar.getAtt(""longitude_of_projection_origin""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&lpo); + resVar.nav.lpo=lpo; + + reqVar=gipVar.getAtt(""semi_major_axis""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&req); + resVar.nav.req = req; + reqVar=gipVar.getAtt(""semi_minor_axis""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&rpol); + resVar.nav.rpol=rpol; + reqVar=gipVar.getAtt(""inverse_flattening""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&inverse); + resVar.nav.inverse_flattening=inverse; + reqVar=gipVar.getAtt(""latitude_of_projection_origin""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&lat0); + resVar.nav.lat0=lat0; + reqVar=gipVar.getAtt(""perspective_point_height""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&pph); + resVar.nav.pph=pph; + reqVar=gipVar.getAtt(""longitude_of_projection_origin""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&lam0); + lam0 = lam0*DTOR; + resVar.nav.lam0=lam0; + } + NcVar fk1Var=dataFile.getVar(""planck_fk1""); + fk1Var.getVar(&fk1); + if(fk1Var.isNull()) return NC_ERR; + if(channelnum == 1) + { + resVar.nav.fk1=fk1; + } + if(channelnum == 2) + { + resVar.nav.fk12 = fk1; + } + if(channelnum == 3) + { + resVar.nav.fk13 = fk1; + } + + NcVar fk2Var=dataFile.getVar(""planck_fk2""); + fk2Var.getVar(&fk2); + if(fk2Var.isNull()) return NC_ERR; + if(channelnum == 1) + { + resVar.nav.fk2=fk2; + } + if(channelnum == 2) + { + resVar.nav.fk22 = fk2; + } + if(channelnum == 3) + { + resVar.nav.fk23 = fk2; + } + + NcVar bc1Var=dataFile.getVar(""planck_bc1""); + bc1Var.getVar(&bc1); + if(bc1Var.isNull()) return NC_ERR; + if(channelnum == 1) + { + resVar.nav.bc1=bc1; + } + if(channelnum == 2) + { + resVar.nav.bc12 = bc1; + } + if(channelnum == 3) + { + resVar.nav.bc13 = bc1; + } + + NcVar bc2Var=dataFile.getVar(""planck_bc2""); + bc2Var.getVar(&bc2); + if(bc2Var.isNull()) return NC_ERR; + if(channelnum == 1) + { + resVar.nav.bc2=bc2; + } + if(channelnum == 2) + { + resVar.nav.bc22 = bc2; + } + if(channelnum == 3) + { + resVar.nav.bc23 = bc2; + } + + NcVar kap1Var=dataFile.getVar(""kappa0""); + kap1Var.getVar(&kap1); + if(kap1Var.isNull()) return NC_ERR; + if(channelnum == 1) + { + resVar.nav.kap1=kap1; + } + if(channelnum == 2) + { + resVar.nav.kap12 = kap1; + } + if(channelnum == 3) + { + resVar.nav.kap13 = kap1; + } + float H = pph+req; + float x1v, y1v, x1v2,y1v2,x1v3,y1v3,x1v4,y1v4; + //This looks like strange notation because there used to be a subsetter here + //I am now moving that out of octane + int minx,maxx,miny,maxy; + minx = 0; + maxx = xdimsize; + miny = 0; + maxy = ydimsize; + //now get them in CLAVR-x coords + + if(channelnum == 1) + { + int nc = 1; + if(args.doc2 == 1) nc++; + if(args.doc3 == 1) nc++; + + resVar.data.setdims(maxx-minx,maxy-miny,nc); + resVar.data.data = new float[(maxx-minx)*(maxy-miny)*nc]; + } else{ + data3= new float[nv]; // we need a dummy array for the zoom function + } + lat = new float[nv]; + lon = new float[nv]; + xs = new short[maxx-minx]; + ys = new short[maxy-miny]; + data2s = new short[nv]; + if(channelnum == 1) + { + resVar.nav.nx=(maxx-minx); + resVar.nav.ny=(maxy-miny); + } + if(channelnum == 2) + { + resVar.nav.nx2=(maxx-minx); + resVar.nav.ny2=(maxy-miny); + } + if(channelnum == 3) + { + resVar.nav.nx3=(maxx-minx); + resVar.nav.ny3=(maxy-miny); + } + yVar.getVar(y); + xVar.getVar(x); + if(channelnum == 1) tVar.getVar(&resVar.t); + dataVar.getVar(data2); + bandVar.getVar(&band); + if(dataVar.isNull()) return NC_ERR; + if(bandVar.isNull()) return NC_ERR; + if(yVar.isNull()) return NC_ERR; + if(xVar.isNull()) return NC_ERR; + if(channelnum ==1) + { + if(band == 2) + { + resVar.nav.minXc = minx/4; + resVar.nav.minYc = miny/4; + resVar.nav.maxXc = maxx/4; + resVar.nav.maxYc = maxy/4; + } else if(band == 1 || band == 3) + { + resVar.nav.minXc = minx/2; + resVar.nav.minYc = miny/2; + resVar.nav.maxXc = maxx/2; + resVar.nav.maxYc = maxy/2; + } else + { + resVar.nav.minXc = minx; + resVar.nav.minYc = miny; + resVar.nav.maxXc = maxx; + resVar.nav.maxYc = maxy; + } + } + resVar.nav.minX = minx; + resVar.nav.minY = miny; + resVar.nav.maxX = maxx; + resVar.nav.maxY = maxy; + float maxch, minch; + float minout = 0.; + float maxout = 255.; + oct_bandminmax(band,maxch,minch); + if(channelnum == 1) + { + if(args.setNormMax) args.NormMax = maxch; + if(args.setNormMin) args.NormMin = minch; + } + if(channelnum == 2) + { + if(args.setNormMax2) args.NormMax2 = maxch; + if(args.setNormMin2) args.NormMin2 = minch; + } + if(channelnum == 3) + { + if(args.setNormMax3) args.NormMax3 = maxch; + if(args.setNormMin3) args.NormMin3 = minch; + } + + if(channelnum > 1){ + oct_navcal_cuda(data2,data2s, x, y, xs, ys, xdimsize, + ydimsize, minx, maxx, miny, maxy, data3, lat, + lon,cal,datf,xScale,xOffset,yScale, + yOffset,radScale,radOffset,rpol,req, + H,lam0,fk1,fk2,bc1,bc2, + kap1,maxch,minch,maxout,minout,donav,args); + if(resVar.nav.nx > (maxx-minx)) + { + oct_zoom_in_float(data3, resVar.data.data,(maxx-minx),(maxy-miny),resVar.nav.nx,resVar.nav.ny,channelnum-1,1); + } else{ + double factor = (double) resVar.nav.nx / ((double) (maxx-minx)); + double factor2 = (double) resVar.nav.ny / ((double) (maxy-miny)); + if(pow(factor-factor2,2) > 0.000001) + { + printf(""Image x and y dimensions not compatable for scaling (factor not the same), exiting""); + exit(0); + } + oct_zoom_out_float(data3,resVar.data.data,(maxx-minx),(maxy-miny),factor,0,channelnum-1); + } + } else{ + + oct_navcal_cuda(data2,data2s, x, y, xs, ys, xdimsize, + ydimsize, minx, maxx, miny, maxy, resVar.data.data, lat, + lon,cal,datf,xScale,xOffset,yScale, + yOffset,radScale,radOffset,rpol,req, + H,lam0,fk1,fk2,bc1,bc2, + kap1,maxch,minch,maxout,minout,donav,args); + } + + + if(channelnum == 1){ + resVar.latVal = lat; + resVar.lonVal = lon; + resVar.x = xs; + resVar.y = ys; + resVar.dataSVal = data2s; + resVar.band =(int)band; + } + if(channelnum == 2){ + resVar.band2 =(int)band; + } + if(channelnum == 3){ + resVar.band3 =(int)band; + } + delete [] data2; + delete [] x; + delete [] y; + if(channelnum > 1) delete [] data3; + + }catch(NcException& e) + { + e.what(); + cout<<""OCT_GOESREAD FAILURE, CHECK THAT ALL VARIABLES AND ATTS EXIST""< xxv=dataFile.getDims(); + NcDim ydim=xxv.find(""y"")->second; + NcDim xdim=xxv.find(""x"")->second; + + ydimsize=ydim.getSize(); + xdimsize=xdim.getSize(); + nv = xdimsize*ydimsize; + data2= new float[nv]; + if(!data2){ + cout << ""Memory Allocation Failed\n""; + exit(0); + } + + x = new short[xdimsize]; + if(!x){ + cout << ""Memory Allocation Failed y\n""; + exit(0); + } + y = new short[ydimsize]; + if(!y){ + cout << ""Memory Allocation Failed y\n""; + exit(0); + } + + + dataVar=dataFile.getVar(""Rad""); + yVar = dataFile.getVar(""y""); + xVar = dataFile.getVar(""x""); + tVar = dataFile.getVar(""t""); + reqVar=yVar.getAtt(""scale_factor""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&yScale); + resVar.nav.yScale=yScale; + reqVar=yVar.getAtt(""add_offset""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&yOffset); + resVar.nav.yOffset=yOffset; + reqVar=xVar.getAtt(""scale_factor""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&xScale); + resVar.nav.xScale=xScale; + reqVar=xVar.getAtt(""add_offset""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&xOffset); + resVar.nav.xOffset = xOffset; + reqVar=tVar.getAtt(""units""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(tUnitString); + resVar.tUnits=tUnitString; + gipVar = dataFile.getVar(""grid_mapping""); + gipVar.getVar(&gipv); + resVar.nav.gipVal=(float) gipv; + + reqVar=gipVar.getAtt(""lat1""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&lat1); + reqVar=gipVar.getAtt(""lon0""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&lon0); + reqVar=gipVar.getAtt(""R""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&R); + + resVar.nav.lat1=lat1; + resVar.nav.lon0=lon0; + resVar.nav.R=R; + float x1v, y1v, x1v2,y1v2,x1v3,y1v3,x1v4,y1v4; + int minx, maxx, miny,maxy; + minx = 0; + maxx = xdimsize; + miny = 0; + maxy = ydimsize; + + if(channelnum == 1) + { + int nc = 1; + if(args.doc2 == 1) nc++; + if(args.doc3 == 1) nc++; + + resVar.data.setdims(maxx-minx,maxy-miny,nc); + resVar.data.data = new float[(maxx-minx)*(maxy-miny)*nc]; + } + data3int= new float[nv]; + if(args.doc2 == 1) data3int2= new float[nv]; + if(args.doc3 == 1) data3int3= new float[nv]; + lat = new float[nv]; + lon = new float[nv]; + xs = new short[maxx-minx]; + ys = new short[maxy-miny]; + data2s = new short[nv]; + if(channelnum == 1) + { + resVar.nav.nx=(maxx-minx); + resVar.nav.ny=(maxy-miny); + } + if(channelnum == 2) + { + resVar.nav.nx2=(maxx-minx); + resVar.nav.ny2=(maxy-miny); + } + if(channelnum == 3) + { + resVar.nav.nx3=(maxx-minx); + resVar.nav.ny3=(maxy-miny); + } + yVar.getVar(y); + xVar.getVar(x); + if(channelnum == 1) tVar.getVar(&resVar.t); + dataVar.getVar(data2); + if(dataVar.isNull()) return NC_ERR; + if(yVar.isNull()) return NC_ERR; + if(xVar.isNull()) return NC_ERR; + if(channelnum == 1) + { + resVar.nav.minXc = minx; + resVar.nav.minYc = miny; + resVar.nav.maxXc = maxx; + resVar.nav.maxYc = maxy; + } + resVar.nav.minX = minx; + resVar.nav.minY = miny; + oct_polar_navcal_cuda(data2,data2s, x,y,xs,ys, xdimsize, + ydimsize,minx,maxx,miny,maxy, resVar.data.data, lat, + lon,xScale,xOffset,yScale,yOffset, lon0, lat1, + R,donav,channelnum,args); + if(channelnum == 1) + { + resVar.latVal = lat; + resVar.lonVal = lon; + resVar.x = xs; + resVar.y = ys; + resVar.dataSVal = data2s; + if(args.dointerp==1) + { + resVar.dataSValfloat = data3int; + } + resVar.band =(int)band; + } + if(channelnum == 2) + { + if(args.dointerp==1) + { + resVar.dataSValfloat2 = data3int2; + } + } + if(channelnum == 3) + { + if(args.dointerp==1) + { + resVar.dataSValfloat3 = data3int3; + } + } + + delete [] data2; + delete [] x; + delete [] y; + }catch(NcException& e) + { + e.what(); + cout<<""OCT_POLARREAD FAILURE, CHECK THAT ALL VARIABLES AND ATTS EXIST""< xxv=dataFile.getDims(); + NcDim ydim=xxv.find(""y"")->second; + NcDim xdim=xxv.find(""x"")->second; + + ydimsize=ydim.getSize(); + xdimsize=xdim.getSize(); + nv = xdimsize*ydimsize; + data2= new float[nv]; + if(!data2){ + cout << ""Memory Allocation Failed\n""; + exit(1); + } + + x = new short[xdimsize]; + if(!x){ + cout << ""Memory Allocation Failed y\n""; + exit(1); + } + y = new short[ydimsize]; + if(!y){ + cout << ""Memory Allocation Failed y\n""; + exit(1); + } + dataVar=dataFile.getVar(""Rad""); + yVar = dataFile.getVar(""y""); + xVar = dataFile.getVar(""x""); + tVar = dataFile.getVar(""t""); + reqVar=yVar.getAtt(""scale_factor""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&yScale); + resVar.nav.yScale=yScale; + reqVar=yVar.getAtt(""add_offset""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&yOffset); + resVar.nav.yOffset=yOffset; + reqVar=xVar.getAtt(""scale_factor""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&xScale); + resVar.nav.xScale=xScale; + reqVar=xVar.getAtt(""add_offset""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&xOffset); + resVar.nav.xOffset = xOffset; + reqVar=tVar.getAtt(""units""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(tUnitString); + resVar.tUnits=tUnitString; + gipVar = dataFile.getVar(""grid_mapping""); + gipVar.getVar(&gipv); + resVar.nav.gipVal=(float) gipv; + + reqVar=gipVar.getAtt(""lon1""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&lon1); + + reqVar=gipVar.getAtt(""R""); + if (reqVar.isNull()) return NC_ERR; + reqVar.getValues(&R); + + resVar.nav.lon1=lon1; + resVar.nav.R=R; + float x1v, y1v, x1v2,y1v2,x1v3,y1v3,x1v4,y1v4; + int minx, maxx, miny,maxy; + minx = 0; + maxx = xdimsize; + miny = 0; + maxy = ydimsize; + + + + resVar.data.setdims(maxx-minx,maxy-miny,1); + resVar.data.data = new float[(maxx-minx)*(maxy-miny)*1]; + lat = new float[nv]; + lon = new float[nv]; + xs = new short[maxx-minx]; + ys = new short[maxy-miny]; + data2s = new short[nv]; + resVar.nav.nx=(maxx-minx); + resVar.nav.ny=(maxy-miny); + yVar.getVar(y); + xVar.getVar(x); + tVar.getVar(&resVar.t); + dataVar.getVar(data2); + if(dataVar.isNull()) return NC_ERR; + if(yVar.isNull()) return NC_ERR; + if(xVar.isNull()) return NC_ERR; + resVar.nav.minXc = minx; + resVar.nav.minYc = miny; + resVar.nav.maxXc = maxx; + resVar.nav.maxYc = maxy; + resVar.nav.minX = minx; + resVar.nav.minY = miny; + oct_merc_navcal_cuda(data2,data2s, x,y,xs,ys, xdimsize, + ydimsize,minx,maxx,miny,maxy, resVar.data.data, lat, + lon,xScale,xOffset,yScale,yOffset, lon1, + R,donav,args); + + resVar.latVal = lat; + resVar.lonVal = lon; + resVar.x = xs; + resVar.y = ys; + resVar.dataSVal = data2s; + resVar.band =(int)band; + delete [] data2; + delete [] x; + delete [] y; + + }catch(NcException& e) + { + e.what(); + cout<<""OCT_MERCREAD FAILURE, CHECK THAT ALL VARIABLES AND ATTS EXIST""< xxv=dataFile.getDims(); + NcDim ydim=xxv.find(""ny"")->second; + NcDim xdim=xxv.find(""nx"")->second; + + ydimsize=ydim.getSize(); + xdimsize=xdim.getSize(); + nv = xdimsize*ydimsize; + xmax = resVar.nav.maxXc; + xmin = resVar.nav.minXc; + ymax = resVar.nav.maxYc; + ymin = resVar.nav.minYc; + + data3= new float[nv]; + if(!data3){ + cout << ""Memory Allocation Failed\n""; + exit(1); + } + + + resVar.nav.CTHx=xmax-xmin; + resVar.nav.CTHy=ymax-ymin; + dataVar=dataFile.getVar(""Cloud_Top_Height_Effective""); + dataVar.getVar(data3); + resVar.CTHVal = new float[resVar.nav.nx*resVar.nav.ny]; + if(resVar.nav.nx > (xmax-xmin)) + { + oct_zoom_in_float(data3, resVar.CTHVal,(xmax-xmin),(ymax-ymin),resVar.nav.nx,resVar.nav.ny,0,args.interpcth); + } else{ + double factor = (double) resVar.nav.nx / ((double) (xmax-xmin)); + double factor2 = (double) resVar.nav.ny / ((double) (ymax-ymin)); + if(pow(factor-factor2,2) > 0.000001) + { + printf(""Image x and y dimensions not compatable for scaling (factor not the same), CTH data problem""); + exit(0); + } + oct_zoom_out_float(data3,resVar.CTHVal,(xmax-xmin),(ymax-ymin),factor,0,0); + } + delete [] data3; + }catch(NcException& e) + { + e.what(); + cout<<""OCT_CLAVRXREAD FAILURE, CHECK THAT ALL VARIABLES AND ATTS EXIST""< xxv=dataFile.getDims(); + NcDim ydim=xxv.find(""ny"")->second; + NcDim xdim=xxv.find(""nx"")->second; + + ydimsize=ydim.getSize(); + xdimsize=xdim.getSize(); + nv = xdimsize*ydimsize; + xmax = resVar.nav.maxX; + xmin = resVar.nav.minX; + ymax = resVar.nav.maxY; + ymin = resVar.nav.minY; + + data3= new float[nv]; + if(!data3){ + cout << ""Memory Allocation Failed\n""; + exit(1); + } + data4= new float[nv]; + if(!data4){ + cout << ""Memory Allocation Failed\n""; + exit(1); + } + dataVar=dataFile.getVar(""UFG""); + dataVar.getVar(data3); + dataVar=dataFile.getVar(""VFG""); + dataVar.getVar(data4); + resVar.uPix= data3; + resVar.vPix= data4; + }catch(NcException& e) + { + e.what(); + cout<<""OCT_FGREAD FAILURE, CHECK THAT ALL VARIABLES AND ATTS EXIST""< +#include +using namespace std; +//Purpose: This is a set of utility functions to clean up the optical flow code +// dMatrix is a double 2d pointer allocation function, run free_dMatrix to free +// Same goes for float +//Author: Jason Apke, Updated 9/10/2018 + +double **dMatrix (int nRows, int nCols) +{ + double **mat; + mat = new double *[nRows]; + if(!mat) + { + cout << ""Not enough memory\n""; + exit(0); + } + + for(int mi = 0; mi < nRows; mi++){ + mat[mi] = new double [nCols]; + if(!mat[mi]) + { + cout << ""Not enough memory\n""; + exit(0); + } + } + return mat; +} +double ***dImage (int nrow, int ncol,int nchannels) +{ + double ***data; + data = new double **[nrow]; + if(!data) + { + std::cout << ""Not enough memory\n""; + exit(0); + } + for(int mi = 0; mi < nrow; mi++){ + data[mi] = new double *[ncol]; + if(!data[mi]) + { + std::cout << ""Not enough memory\n""; + exit(0); + } + for(int mj = 0; mj < ncol; mj++){ + data[mi][mj] = new double [nchannels]; + if(!data[mi][mj]) + { + std::cout << ""Not enough memory\n""; + exit(0); + } + } + } + return data; +} +float **fMatrix (int nRows, int nCols) +{ + float **mat; + mat = new float *[nRows]; + if(!mat) + { + cout << ""Not enough memory\n""; + exit(0); + } + + for(int mi = 0; mi < nRows; mi++){ + mat[mi] = new float [nCols]; + if(!mat[mi]) + { + cout << ""Not enough memory\n""; + exit(0); + } + } + return mat; +} +void dMatrix_initzero(double **mat,int nRows, int nCols) +{ + for(int mi = 0; mi < nRows; mi++) + { + for(int mj = 0; mj < nCols; mj++) + { + mat[mi][mj] = 0; + } + } +} +void free_dMatrix (double **mat, int nRows) +{ + for(int mi = 0; mi < nRows; mi++) + delete [] mat[mi]; + delete [] mat; +} + +void free_dImage (double ***data, int nrow,int ncol) +{ + for(int ni = 0; ni < nrow; ni++) + { + for(int nj = 0; nj < ncol; nj++) + delete[] data[ni][nj]; + delete[] data[ni]; + } + delete [] data; +} +void free_fMatrix (float **mat, int nRows) +{ + for(int mi = 0; mi < nRows; mi++) + delete [] mat[mi]; + delete [] mat; +} + + +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/oct_bicubic.cc",".cc","4347","151","#include +#include +#include +#include +#include ""oct_bc.h"" +using namespace std; +//Function: oct_bicubic +//Purpose: Does bicubic interpolation of a point in 2 dimensions + +static double oct_cell ( + double v[4], //interpolation points + double x //point to be interpolated +) +{ + return v[1] + 0.5 * x * (v[2] - v[0] + + x * (2.0 * v[0] - 5.0 * v[1] + 4.0 * v[2] - v[3] + + x * (3.0 * (v[1] - v[2]) + v[3] - v[0]))); +} +//Bicubic interpolation (cubic for 2 dimensions) +static double oct_bicubic_cell ( + double p[4][4], //array containing the interpolation points + double x, //x position to be interpolated + double y //y position to be interpolated +) +{ + double v[4]; + v[0] = oct_cell(p[0], y); + v[1] = oct_cell(p[1], y); + v[2] = oct_cell(p[2], y); + v[3] = oct_cell(p[3], y); + + return oct_cell(v, x); +} + + +double oct_bicubic(double * input, double uu, double vv, int nx, int ny,int inout) +{ + int sx = 1; + int sy = 1; + int x, y, mx, my, dx,dy,ddx,ddy; + bool bc; + + //I always use reflecting boundary conditions, so this should work + x = oct_bc((int) uu,nx,bc); + y = oct_bc((int) vv,ny,bc); + mx = oct_bc((int) (uu-sx),nx,bc); + my = oct_bc((int) (vv-sy),ny,bc); + dx = oct_bc((int) (uu+sx),nx,bc); + dy = oct_bc((int) (vv+sy),ny,bc); + ddx = oct_bc((int) (uu+2*sx),nx,bc); + ddy = oct_bc((int) (vv+2*sy),ny,bc); + //Recent JMAMOD I got a hunch.... + //ddx = oct_bc((int) (uu-2*sx),nx); + //ddy = oct_bc((int) (vv-2*sy),ny); + int nxtmy = nx*my; + int nxty = nx*y; + int nxtdy = nx*dy; + int nxtddy = nx*ddy; + + const double p11 = input[mx + nxtmy]; + const double p12 = input[x + nxtmy]; + const double p13 = input[dx + nxtmy]; + const double p14 = input[ddx + nxtmy]; + + const double p21 = input[mx + nxty]; + const double p22 = input[x + nxty]; + const double p23 = input[dx + nxty]; + const double p24 = input[ddx + nxty]; + + const double p31 = input[mx + nxtdy]; + const double p32 = input[x + nxtdy]; + const double p33 = input[dx + nxtdy]; + const double p34 = input[ddx + nxtdy]; + + const double p41 = input[mx + nxtddy]; + const double p42 = input[x + nxtddy]; + const double p43 = input[dx + nxtddy]; + const double p44 = input[ddx + nxtddy]; + + double pol[4][4] = { + {p11, p21, p31, p41}, + {p12, p22, p32, p42}, + {p13, p23, p33, p43}, + {p14, p24, p34, p44} + }; + double f = oct_bicubic_cell(pol,uu-x,vv-y); + if(f != f) + { + printf(""Bicubic failure, possible data issue\n""); + + exit(0); + + } + return f; + +} +double oct_bicubic_float(float * input, double uu, double vv, int nx, int ny,int inout) +{ + int sx = 1; + int sy = 1; + int x, y, mx, my, dx,dy,ddx,ddy; + bool bc; + + x = oct_bc((int) uu,nx,bc); + y = oct_bc((int) vv,ny,bc); + mx = oct_bc((int) (uu-sx),nx,bc); + my = oct_bc((int) (vv-sy),ny,bc); + dx = oct_bc((int) (uu+sx),nx,bc); + dy = oct_bc((int) (vv+sy),ny,bc); + ddx = oct_bc((int) (uu+2*sx),nx,bc); + ddy = oct_bc((int) (vv+2*sy),ny,bc); + int nxtmy = nx*my; + int nxty = nx*y; + int nxtdy = nx*dy; + int nxtddy = nx*ddy; + + const double p11 = input[mx + nxtmy]; + const double p12 = input[x + nxtmy]; + const double p13 = input[dx + nxtmy]; + const double p14 = input[ddx + nxtmy]; + + const double p21 = input[mx + nxty]; + const double p22 = input[x + nxty]; + const double p23 = input[dx + nxty]; + const double p24 = input[ddx + nxty]; + + const double p31 = input[mx + nxtdy]; + const double p32 = input[x + nxtdy]; + const double p33 = input[dx + nxtdy]; + const double p34 = input[ddx + nxtdy]; + + const double p41 = input[mx + nxtddy]; + const double p42 = input[x + nxtddy]; + const double p43 = input[dx + nxtddy]; + const double p44 = input[ddx + nxtddy]; + + double pol[4][4] = { + {p11, p21, p31, p41}, + {p12, p22, p32, p42}, + {p13, p23, p33, p43}, + {p14, p24, p34, p44} + }; + double f = oct_bicubic_cell(pol,uu-x,vv-y); + if(f != f) + { + printf(""Bicubic failure, possible data issue\n""); + exit(0); + } + return f; +} +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/oct_optical_flow.cc",".cc","4058","112","#include +#include +#include +#include ""image.h"" +#include ""goesread.h"" +#include ""offlags.h"" + +using namespace std; + +//Optical flow approach options +void oct_patch_match_optical_flow (float *, float *, float *, float *, int, int,OFFlags); +void oct_variational_optical_flow (Image, Image, float *, float *,float *, int, int,int,OFFlags); + +//Post-processing functions +void oct_pix2uv_cuda(GOESVar&,double,float *,float *,short *, short *,short *, short *,OFFlags); +void oct_uv2pix(GOESVar&,float *, float *,double,OFFlags); +void oct_srsal_cu (float *, float *, float *, int, int,OFFlags); + + +//This is the optical flow code wrapper, reads in just the two GOESVar image files and the arguments +int oct_optical_flow (GOESVar &goesData,GOESVar &goesData2,OFFlags &args) +{ + int nx, ny,i,j; + long lxyz; + //Below are the short arrays to store the datasets on the output netcdf file + short *ur, *vr,*ur2,*vr2,*CTP; + //Below is the float-precision return from the optical flow algorithms + nx = goesData.nav.nx; + ny = goesData.nav.ny; + int nxtny = nx*ny; + // array allocations + ur = new short [nxtny]; + vr = new short [nxtny]; + ur2 = new short [nxtny]; + vr2 = new short [nxtny]; + + //Preprocessing steps here, determine the value of the first guess fed into u/vPix + if(args.dofirstguess == 0) + { + //this is otherwise delcared in firstguess read where it is filled + goesData.uPix = new float [nxtny]; //float precision pixel displacements + goesData.vPix = new float [nxtny]; + // fill it with 0s + for(int fi = 0; fi +#include +#include +#include +#include ""oct_bicubic.h"" +#include ""oct_gaussian.h"" +#include ""image.h"" +using namespace std; +//Purpose: These are a collection of zoom in/out functions designed for scaling the datasets +//to the same resolutions where needed + +void oct_zoom_size(int nx, int ny, int &nxx, int &nyy, double factor) +{ + nxx = (int)((double)nx* factor + 0.5); + nyy = (int)((double)ny* factor + 0.5); +} +void oct_zoom_out(double * image, double * imageout, int nx, int ny, double factor,int verb) +{ + //define temp image for smoothing + int inout = 2; + double *Is; + + Is = new double [nx*ny]; + for (int i = 0; i < nx*ny; i++){ + Is[i] = image[i]; + } + + int nxx, nyy; + oct_zoom_size(nx, ny, nxx,nyy,factor); + //Smooth the image first + const double sigma = 0.6*sqrt(1.0/(factor*factor)-1.0); + oct_gaussian(Is, nx, ny, sigma); + //now interpolate + for(int jj = 0; jj < nyy; jj++) + { + long nxxtjj = nxx*jj; + for(int ii = 0; ii < nxx; ii++) + { + const double i2 = (double) ii / factor; + const double j2 = (double) jj / factor; + double g = oct_bicubic(Is,i2,j2, nx, ny,inout); + if(verb == 0){ + imageout[ii+nxxtjj] = g; + }else{ + imageout[ii+nxxtjj] = g*factor; //This is for the flow, the flow needs to be scaled down + } + } + } + delete [] Is; +} +void oct_zoom_out_float(float * image, float * imageout, int nx, int ny, double factor,int verb,int cnum) +{ + int inout = 2; + double *Is; + double g; + + Is = new double [nx*ny]; + for (int i = 0; i < nx*ny; i++){ + Is[i] = image[i]; + } + if(verb == 1) exit(0); + + int nxx, nyy; + oct_zoom_size(nx, ny, nxx,nyy,factor); + long cnumt = cnum*nxx*nyy; + //Smooth the image first + const double sigma = 0.6*sqrt(1.0/(factor*factor)-1.0); + oct_gaussian(Is, nx, ny, sigma); + //now interpolate + for(int jj = 0; jj < nyy; jj++) + { + long nxxtjj = nxx*jj; + for(int ii = 0; ii < nxx; ii++) + { + const double i2 = (double) ii / factor; + const double j2 = (double) jj / factor; + if(factor < 0.999999) + { + g = oct_bicubic(Is,i2,j2, nx, ny,inout); + } else + { + g = image[ii+nxxtjj]; + } + imageout[ii+nxxtjj+cnum] = g; + } + } + delete [] Is; +} +void oct_zoom_out_2d(double * image, double ** imageout, int nx, int ny, double factor,int verb) +{ + //define temp image for smoothing + int inout = 2; + double *Is; + + Is = new double [nx*ny]; + for (int i = 0; i < nx*ny; i++){ + Is[i] = image[i]; + } + if(verb == 1) exit(0); + + int nxx, nyy; + oct_zoom_size(nx, ny, nxx,nyy,factor); + //Smooth the image first + const double sigma = 0.6*sqrt(1.0/(factor*factor)-1.0); + oct_gaussian(Is, nx, ny, sigma); + //now interpolate + for(int ii = 0; ii < nxx; ii++) + { + for(int jj = 0; jj < nyy; jj++) + { + const double i2 = (double) ii / factor; + const double j2 = (double) jj / factor; + double g = oct_bicubic(Is,i2,j2, nx, ny,inout); + imageout[ii][jj] = g; + } + } + delete [] Is; +} +//this is an image zoom out +void oct_zoom_out_image(double **image, Image &imageout, int nx, int ny, double factor,int verb) +{ + //define temp image for smoothing + int inout = 2; + double *Is; + + Is = new double [nx*ny]; + for (int c=0; c +#include +#include ""image.h"" +#include ""goesread.h"" +#include ""offlags.h"" +using namespace std; +using namespace netCDF; +using namespace netCDF::exceptions; + +//Function: oct_filewrite- a function to write the output from OCTANE +//Requires: The netcdf C++ library +//Author: Jason Apke, Updated 2/21/2022 +static const int NC_ERR = 2; +//There are a few functions designed for outputting certian types of files, the primary is goeswrite +//Additions will be made for a simple image output as well -J. Apke 2/22/2022 + +int oct_goeswrite (string fpath,GOESVar &resVar,OFFlags args) +{ + //This is a function designed for reading GOES data files + int r = 1; + try + { + NcFile ncf(fpath, NcFile::replace); + NcVar dataVar22, dataVar23,dataVar32,dataVar33,dataVar2Occlusion; + NcVar cnrVar,dataVarraw,dataVarraw2,dataVar,dataVar2,dataVarCTP,dataVar3; + NcVar fk1Var,fk2Var,bc1Var,bc2Var,kapVar; + NcVar fk1Var2,fk2Var2,bc1Var2,bc2Var2,kapVar2; + NcVar fk1Var3,fk2Var3,bc1Var3,bc2Var3,kapVar3; + NcDim xDim = ncf.addDim(""x"",resVar.nav.nx); + NcDim yDim = ncf.addDim(""y"",resVar.nav.ny); + + //now create the variable + NcVar xVar = ncf.addVar(""x"",ncShort, xDim); + NcVar yVar = ncf.addVar(""y"",ncShort, yDim); + xVar.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.xScale); + xVar.putAtt(""add_offset"",NC_FLOAT,resVar.nav.xOffset); + yVar.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.yScale); + yVar.putAtt(""add_offset"",NC_FLOAT,resVar.nav.yOffset); + + xVar.putVar(resVar.x); + yVar.putVar(resVar.y); + + NcVar tVar = ncf.addVar(""t"",ncDouble); + tVar.putAtt(""standard_name"",""time""); + tVar.putAtt(""units"",resVar.tUnits); + tVar.putAtt(""axis"",""T""); + tVar.putAtt(""bounds"",""time_bounds""); + tVar.putAtt(""long_name"" , ""J2000 epoch mid-point between the start and end image scan in seconds""); + if(args.putinterp == 1){ + tVar.putAtt(""frdt"",NC_FLOAT,resVar.frdt); + } + + if(args.putinterp == 0) + { + + tVar.putVar(&resVar.t); + } else + { + tVar.putVar(&resVar.tint); + } + + + vector dims; + dims.push_back(yDim); + dims.push_back(xDim); + + if(args.outnav){ + dataVar = ncf.addVar(""U"",ncShort,dims); + dataVar2 = ncf.addVar(""V"",ncShort,dims); + } + if(args.outraw){ + dataVarraw = ncf.addVar(""U_raw"",ncShort,dims); + dataVarraw2 = ncf.addVar(""V_raw"",ncShort,dims); + } + if(args.docorn == 1) + cnrVar = ncf.addVar(""cnr"",ncShort,dims); + //This is to output the full float resolution pixel displacements if needed + //Though it should not be predicated on dosrsal... + if(args.pixuv==1) + { + dataVar22 = ncf.addVar(""Upix"",ncFloat,dims); + dataVar23 = ncf.addVar(""Vpix"",ncFloat,dims); + } + if(args.putinterp==1) + { + //variable to store the occlusion masks + dataVar2Occlusion = ncf.addVar(""Occlusion"",ncShort,dims); + } + if(args.outctp && (args.doCTH == 1)) + { + dataVarCTP = ncf.addVar(""CTP"",ncShort,dims); + } + + if(args.outrad) + { + dataVar3 = ncf.addVar(""Rad"",ncShort,dims); + if(args.doc2 == 1) dataVar32 = ncf.addVar(""Rad2"",ncShort,dims); + if(args.doc3 == 1) dataVar33 = ncf.addVar(""Rad3"",ncShort,dims); + } + //Below are not optional outputs, including the goes projection and the optical flow settings + NcVar gipVar = ncf.addVar(""goes_imager_projection"",NC_INT); + NcVar ofVar = ncf.addVar(""optical_flow_settings"",NC_INT); + //no need for below if outrad is false + if(args.outrad) + { + + fk1Var = ncf.addVar(""planck_fk1"",NC_FLOAT); + fk2Var = ncf.addVar(""planck_fk2"",NC_FLOAT); + bc1Var = ncf.addVar(""planck_bc1"",NC_FLOAT); + bc2Var = ncf.addVar(""planck_bc2"",NC_FLOAT); + kapVar = ncf.addVar(""kappa0"",NC_FLOAT); + if(args.doc2 == 1) + { + fk1Var2 = ncf.addVar(""planck_fk1_2"",NC_FLOAT); + fk2Var2 = ncf.addVar(""planck_fk2_2"",NC_FLOAT); + bc1Var2 = ncf.addVar(""planck_bc1_2"",NC_FLOAT); + bc2Var2 = ncf.addVar(""planck_bc2_2"",NC_FLOAT); + kapVar2 = ncf.addVar(""kappa0_2"",NC_FLOAT); + } + if(args.doc3 == 1) + { + fk1Var3 = ncf.addVar(""planck_fk1_3"",NC_FLOAT); + fk2Var3 = ncf.addVar(""planck_fk2_3"",NC_FLOAT); + bc1Var3 = ncf.addVar(""planck_bc1_3"",NC_FLOAT); + bc2Var3 = ncf.addVar(""planck_bc2_3"",NC_FLOAT); + kapVar3 = ncf.addVar(""kappa0_3"",NC_FLOAT); + } + } + + if(args.outnav){ + dataVar.putAtt(""long_name"",""U""); + dataVar.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVar.putAtt(""scale_factor"",NC_FLOAT,0.01); + dataVar.putAtt(""grid_mapping"",""goes_imager_projection""); + if(args.pixuv == 0) + { + dataVar.putAtt(""units"",""meters per second""); + }else{ + dataVar.putAtt(""units"",""x-pixels""); + } + dataVar2.putAtt(""long_name"",""V""); + dataVar2.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVar2.putAtt(""scale_factor"",NC_FLOAT,0.01); + + dataVar2.putAtt(""grid_mapping"",""goes_imager_projection""); + if(args.pixuv == 1) + { + dataVar2.putAtt(""units"",""y-pixels""); + if(args.dosrsal == 1) + { + dataVar22.putAtt(""long_name"",""Upix""); + dataVar22.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVar22.putAtt(""grid_mapping"",""goes_imager_projection""); + + dataVar23.putAtt(""long_name"",""Vpix""); + dataVar23.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVar23.putAtt(""grid_mapping"",""goes_imager_projection""); + } + }else{ + dataVar2.putAtt(""units"",""meters per second""); + } + } + + + + if(args.docorn == 1) cnrVar.putAtt(""long_name"",""Corner Locations""); + if(args.outraw){ + dataVarraw.putAtt(""long_name"",""U Raw""); + dataVarraw.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVarraw.putAtt(""scale_factor"",NC_FLOAT,0.01); + + dataVarraw.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVarraw.putAtt(""units"",""x-pixels""); + + dataVarraw2.putAtt(""long_name"",""V Raw""); + dataVarraw2.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVarraw2.putAtt(""scale_factor"",NC_FLOAT,0.01); + + dataVarraw2.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVarraw2.putAtt(""units"",""y-pixels""); + } + if(args.putinterp==1) + { + dataVar2Occlusion.putAtt(""long_name"",""Occlusion Masks""); + dataVar2Occlusion.putAtt(""key"",""0 - both, 1 - only in image 1, 2 - only in image 2""); + } + + + + if(args.outctp & (args.doCTH == 1)) + { + dataVarCTP.putAtt(""long_name"",""CTP""); + dataVarCTP.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVarCTP.putAtt(""interpcth"",NC_FLOAT,args.interpcth); //1 if cth was interpolated w/ bicubic interpolation, 0 if nearest neighbor + } + + + if(args.outrad) + { + dataVar3.putAtt(""long_name"",""Rad""); + dataVar3.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVar3.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.radScale); + dataVar3.putAtt(""add_offset"",NC_FLOAT,resVar.nav.radOffset); + dataVar3.putAtt(""grid_mapping"",""goes_imager_projection""); + if(args.doc2 == 1){ + dataVar32.putAtt(""long_name"",""Rad2""); + dataVar32.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVar32.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.radScale2); + dataVar32.putAtt(""add_offset"",NC_FLOAT,resVar.nav.radOffset2); + dataVar32.putAtt(""grid_mapping"",""goes_imager_projection""); + } + if(args.doc3 == 1){ + dataVar33.putAtt(""long_name"",""Rad2""); + dataVar33.putAtt(""grid_mapping"",""goes_imager_projection""); + dataVar33.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.radScale3); + dataVar33.putAtt(""add_offset"",NC_FLOAT,resVar.nav.radOffset3); + dataVar33.putAtt(""grid_mapping"",""goes_imager_projection""); + } + } + gipVar.putAtt(""long_name"" , ""GOES-R ABI fixed grid projection""); + gipVar.putAtt(""grid_mapping_name"" , ""geostationary""); + gipVar.putAtt(""perspective_point_height"",NC_DOUBLE,resVar.nav.pph); + gipVar.putAtt(""semi_major_axis"",NC_DOUBLE,resVar.nav.req); + gipVar.putAtt(""semi_minor_axis"",NC_DOUBLE,resVar.nav.rpol); + gipVar.putAtt(""inverse_flattening"",NC_DOUBLE, resVar.nav.inverse_flattening) ; + gipVar.putAtt(""latitude_of_projection_origin"",NC_DOUBLE,resVar.nav.lat0); + gipVar.putAtt(""longitude_of_projection_origin"",NC_DOUBLE,resVar.nav.lpo); + gipVar.putAtt(""sweep_angle_axis"",""x""); + + ofVar.putAtt(""long_name"" , ""Optical Flow Settings""); + ofVar.putAtt(""key"" , ""1 = Modified Zimmer et al. (2011), 2 = Farneback, 3 = Brox (2004), 4 = Least Squares""); + //An additional navigation variable to check is the nav for the other image used + //Below changes when a mesosector moves, which can create undesirable optical flow results + ofVar.putAtt(""Image2_xOffset"",NC_FLOAT,resVar.nav.g2xOffset); + ofVar.putAtt(""Image2_yOffset"",NC_FLOAT,resVar.nav.g2yOffset); + if((args.oftype==1) || (args.oftype==3)) + { + //Make sure to add ALL arguments used for reproducing modified zimmer approach -J. Apke 2/10/2022 + ofVar.putAtt(""lambda"",NC_DOUBLE,args.lambda); + ofVar.putAtt(""lambdac"",NC_DOUBLE,args.lambdac); //hinting term weight (0 if dofirstguess==0) + ofVar.putAtt(""alpha"", NC_DOUBLE,args.alpha); + ofVar.putAtt(""filtsigma"", NC_DOUBLE,args.filtsigma); + ofVar.putAtt(""ScaleF"", NC_DOUBLE,args.scaleF); + ofVar.putAtt(""K_Iterations"", NC_INT,args.kiters); + ofVar.putAtt(""L_Iterations"", NC_INT,args.liters); + ofVar.putAtt(""M_Iterations"", NC_INT,args.miters); + ofVar.putAtt(""CG_Iterations"", NC_INT,args.cgiters); + ofVar.putAtt(""NormMax"", NC_FLOAT,args.NormMax); + ofVar.putAtt(""NormMin"", NC_FLOAT,args.NormMin); + ofVar.putAtt(""dofirstguess"", NC_INT,args.dofirstguess); + } + //Below is farneback which has been removed from this iteration of OCTANE + //to remove the dependencies on opencv + if(args.oftype==2) + { + ofVar.putAtt(""pyr_scale"",NC_FLOAT,args.fpyr_scale); + ofVar.putAtt(""levels"", NC_INT,args.flevels); + ofVar.putAtt(""winsize"", NC_INT,args.fwinsize); + ofVar.putAtt(""iterations"", NC_INT,args.fiterations); + ofVar.putAtt(""poly_n"", NC_INT,args.poly_n); + ofVar.putAtt(""poly_sigma"", NC_FLOAT,args.poly_sigma); + ofVar.putAtt(""use_initial_flow"", NC_INT,args.uif); + ofVar.putAtt(""farneback_gaussian"", NC_INT,args.fg); + ofVar.putAtt(""NormMax"", NC_FLOAT,args.NormMax); + ofVar.putAtt(""NormMin"", NC_FLOAT,args.NormMin); + } + //Patch Match optical flow settings + if(args.oftype==4) + { + ofVar.putAtt(""Rad"", NC_INT,args.rad); + ofVar.putAtt(""SRad"", NC_INT,args.srad); + ofVar.putAtt(""NormMax"", NC_FLOAT,args.NormMax); + ofVar.putAtt(""NormMin"", NC_FLOAT,args.NormMin); + } + //This is only important for image sequences, dT gives the time difference between images + ofVar.putAtt(""dt_seconds"",NC_FLOAT,resVar.dT); + + + if(args.outnav){ + dataVar.putVar(resVar.uVal); + dataVar2.putVar(resVar.vVal); + } + if(args.outraw){ + dataVarraw.putVar(resVar.uVal2); + dataVarraw2.putVar(resVar.vVal2); + } + + if(args.pixuv == 1) + { + dataVar22.putVar(resVar.uPix); + dataVar23.putVar(resVar.vPix); + } + if(args.putinterp ==1) + { + dataVar2Occlusion.putVar(resVar.occlusion); + } + + if(args.outctp && (args.doCTH == 1)) + { + dataVarCTP.putVar(resVar.CTP); + } + + if(args.outrad) + { + if(args.putinterp == 0) + { + dataVar3.putVar(resVar.dataSVal); + if(args.doc2 == 1) dataVar32.putVar(resVar.dataSVal2); + if(args.doc3 == 1) dataVar33.putVar(resVar.dataSVal3); + } else + { + dataVar3.putVar(resVar.dataSVal); + if(args.doc2 == 1) dataVar32.putVar(resVar.dataSVal2); + if(args.doc3 == 1) dataVar33.putVar(resVar.dataSVal3); + } + } + gipVar.putVar(&resVar.nav.gipVal); + if(args.outrad){ + fk1Var.putVar(&resVar.nav.fk1); + fk2Var.putVar(&resVar.nav.fk2); + bc1Var.putVar(&resVar.nav.bc1); + bc2Var.putVar(&resVar.nav.bc2); + kapVar.putVar(&resVar.nav.kap1); + if(args.doc2 == 1) + { + fk1Var2.putVar(&resVar.nav.fk12); + fk2Var2.putVar(&resVar.nav.fk22); + bc1Var2.putVar(&resVar.nav.bc12); + bc2Var2.putVar(&resVar.nav.bc22); + kapVar2.putVar(&resVar.nav.kap12); + } + if(args.doc3 == 1) + { + fk1Var3.putVar(&resVar.nav.fk13); + fk2Var3.putVar(&resVar.nav.fk23); + bc1Var3.putVar(&resVar.nav.bc13); + bc2Var3.putVar(&resVar.nav.bc23); + kapVar3.putVar(&resVar.nav.kap13); + } + } + return 0; + } + catch(NcException& e) + { + e.what(); + cout << ""GOESWRITE failure\n""; + return NC_ERR; + } +} +//writes polar orthonormal grid files +//Still a bit more to add here, raw output settings, also full float output is +//important for slow motions -J. Apke 2/23/2022 +int oct_polarwrite (string fpath,GOESVar &resVar,OFFlags args) +{ + int r = 1; + float *dummy; + try + { + NcFile ncf(fpath, NcFile::replace); + NcVar dataVar22, dataVar23,dataVar2Occlusion,dataVar3; + NcVar dataVar32, dataVar33; + NcDim xDim = ncf.addDim(""x"",resVar.nav.nx); + NcDim yDim = ncf.addDim(""y"",resVar.nav.ny); + + //now create the variable + NcVar xVar = ncf.addVar(""x"",ncShort, xDim); + NcVar yVar = ncf.addVar(""y"",ncShort, yDim); + xVar.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.xScale); + xVar.putAtt(""add_offset"",NC_FLOAT,resVar.nav.xOffset); + yVar.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.yScale); + yVar.putAtt(""add_offset"",NC_FLOAT,resVar.nav.yOffset); + + xVar.putVar(resVar.x); + yVar.putVar(resVar.y); + + //add the time variables + NcVar tVar = ncf.addVar(""t"",ncDouble); + tVar.putAtt(""standard_name"",""time""); + tVar.putAtt(""units"",resVar.tUnits); + tVar.putAtt(""axis"",""T""); + tVar.putAtt(""bounds"",""time_bounds""); + tVar.putAtt(""long_name"" , ""J2000 epoch mid-point between the start and end image scan in seconds""); + + if(args.dointerp == 1){ + tVar.putAtt(""frdt"",NC_FLOAT,resVar.frdt); + } + if(args.putinterp == 0) + { + tVar.putVar(&resVar.t); + } else + { + tVar.putVar(&resVar.tint); + } + + //now add the data variable + + vector dims; + dims.push_back(yDim); + dims.push_back(xDim); + NcVar dataVar = ncf.addVar(""U"",ncDouble,dims); + NcVar dataVar2 = ncf.addVar(""V"",ncDouble,dims); + if(args.pixuv==1) + { + dataVar22 = ncf.addVar(""Upix"",ncFloat,dims); + dataVar23 = ncf.addVar(""Vpix"",ncFloat,dims); + } + if(args.outrad) + { + dataVar3 = ncf.addVar(""Rad"",ncFloat,dims); + if(args.doc2 == 1) dataVar32 = ncf.addVar(""Rad2"",ncFloat,dims); + if(args.doc3 == 1) dataVar33 = ncf.addVar(""Rad3"",ncFloat,dims); + } + if(args.dointerp==1) + { + dataVar2Occlusion = ncf.addVar(""Occlusion"",ncShort,dims); + } + NcVar gipVar = ncf.addVar(""polar_imager_projection"",NC_INT); + NcVar ofVar = ncf.addVar(""optical_flow_settings"",NC_INT); + + //define atts + dataVar.putAtt(""long_name"",""U""); + dataVar.putAtt(""grid_mapping"",""polar_orthonormal""); + if(args.pixuv == 0) + { + dataVar.putAtt(""units"",""meters per second""); + }else{ + dataVar.putAtt(""units"",""x-pixels""); + } + + + dataVar2.putAtt(""long_name"",""V""); + dataVar2.putAtt(""grid_mapping"",""polar_orthonormal""); + + if(args.pixuv == 1) + { + dataVar2.putAtt(""units"",""y-pixels""); + if(args.dosrsal == 1) + { + dataVar22.putAtt(""long_name"",""Upix""); + + dataVar23.putAtt(""long_name"",""Vpix""); + } + }else{ + dataVar2.putAtt(""units"",""meters per second""); + } + if(args.dointerp==1) + { + dataVar2Occlusion.putAtt(""long_name"",""Occlusion Masks""); + dataVar2Occlusion.putAtt(""key"",""0 - both, 1 - only in image 1, 2 - only in image 2""); + } + if(args.outrad) + { + dataVar3.putAtt(""long_name"",""Rad""); + dataVar3.putAtt(""grid_mapping"",""polar_orthonormal""); + if(args.doc2 == 1) + { + dataVar32.putAtt(""long_name"",""Rad2""); + dataVar32.putAtt(""grid_mapping"",""polar_orthonormal""); + } + if(args.doc3 == 1) + { + dataVar33.putAtt(""long_name"",""Rad3""); + dataVar33.putAtt(""grid_mapping"",""polar_orthonormal""); + } + } + gipVar.putAtt(""long_name"" , ""Polar_Orthonormal_Grid""); + gipVar.putAtt(""grid_mapping_name"" , ""polar""); + double lat1val = resVar.nav.lat1; + double lon0val = resVar.nav.lon0; + double Rval = resVar.nav.R; + gipVar.putAtt(""lat1"",NC_DOUBLE,lat1val); + gipVar.putAtt(""lon0"",NC_DOUBLE,lon0val); + gipVar.putAtt(""R"",NC_DOUBLE,Rval); + + ofVar.putAtt(""long_name"" , ""Optical Flow Settings""); + ofVar.putAtt(""key"" , ""1 = Modified Sun (2014), 2 = Farneback, 3 = Brox (2004)""); + if(args.oftype==1 || args.oftype==3) + { + ofVar.putAtt(""lambda"",NC_DOUBLE,args.lambda); + ofVar.putAtt(""lambdac"",NC_DOUBLE,args.lambdac); //hinting term weight (0 if dofirstguess==0) + ofVar.putAtt(""alpha"", NC_DOUBLE,args.alpha); + ofVar.putAtt(""filtsigma"", NC_DOUBLE,args.filtsigma); + ofVar.putAtt(""ScaleF"", NC_DOUBLE,args.scaleF); + ofVar.putAtt(""K_Iterations"", NC_INT,args.kiters); + ofVar.putAtt(""L_Iterations"", NC_INT,args.liters); + ofVar.putAtt(""M_Iterations"", NC_INT,args.miters); + ofVar.putAtt(""CG_Iterations"", NC_INT,args.cgiters); + ofVar.putAtt(""NormMax"", NC_FLOAT,args.NormMax); + ofVar.putAtt(""NormMin"", NC_FLOAT,args.NormMin); + ofVar.putAtt(""dofirstguess"", NC_INT,args.dofirstguess); + } + if(args.oftype==2) + { + ofVar.putAtt(""pyr_scale"",NC_FLOAT,args.fpyr_scale); + ofVar.putAtt(""levels"", NC_INT,args.flevels); + ofVar.putAtt(""winsize"", NC_INT,args.fwinsize); + ofVar.putAtt(""iterations"", NC_INT,args.fiterations); + ofVar.putAtt(""poly_n"", NC_INT,args.poly_n); + ofVar.putAtt(""poly_sigma"", NC_FLOAT,args.poly_sigma); + ofVar.putAtt(""use_initial_flow"", NC_INT,args.uif); + ofVar.putAtt(""farneback_gaussian"", NC_INT,args.fg); + ofVar.putAtt(""NormMax"", NC_FLOAT,args.NormMax); + ofVar.putAtt(""NormMin"", NC_FLOAT,args.NormMin); + } + ofVar.putAtt(""dt_seconds"",NC_FLOAT,resVar.dT); + + dataVar.putVar(resVar.uPix); + dataVar2.putVar(resVar.vPix); + if(args.pixuv == 1) + { + dataVar22.putVar(resVar.uPix); + dataVar23.putVar(resVar.vPix); + } + if(args.putinterp ==1) + { + dataVar2Occlusion.putVar(resVar.occlusion); + } + if(args.putinterp == 0) + { + long nxtny = resVar.nav.nx*resVar.nav.ny; + dummy = new float[nxtny]; + for(int dum=0; dum < (resVar.nav.nx*resVar.nav.ny); dum++) + { + dummy[dum] = resVar.data.data[dum]; + } + if(args.outrad) + { + dataVar3.putVar(dummy); + if(args.doc2 == 1) + { + for(int dum=0; dum < (resVar.nav.nx*resVar.nav.ny); dum++) + { + dummy[dum] = resVar.data.data[dum+nxtny]; + } + dataVar32.putVar(dummy); + } + if(args.doc3 == 1) + { + for(int dum=0; dum < (resVar.nav.nx*resVar.nav.ny); dum++) + { + dummy[dum] = resVar.data.data[dum+nxtny+nxtny]; + } + dataVar33.putVar(dummy); + } + } + delete [] dummy; + } else + { + dataVar3.putVar(resVar.dataSValfloat); + if(args.doc2 == 1) dataVar32.putVar(resVar.dataSValfloat2); + if(args.doc3 == 1) dataVar33.putVar(resVar.dataSValfloat3); + } + + + gipVar.putVar(&resVar.nav.gipVal); + return 0; + } + catch(NcException& e) + { + e.what(); + return NC_ERR; + } +} +//writes mercator grid netcdf files +int oct_mercwrite (string fpath,GOESVar &resVar,OFFlags args) +{ + //This is a function designed for reading GOES data files + //int r = 1; + try + { + NcFile ncf(fpath, NcFile::replace); + NcVar dataVar22, dataVar23,dataVar3; + NcDim xDim = ncf.addDim(""x"",resVar.nav.nx); + NcDim yDim = ncf.addDim(""y"",resVar.nav.ny); + + //now create the variable + NcVar xVar = ncf.addVar(""x"",ncShort, xDim); + NcVar yVar = ncf.addVar(""y"",ncShort, yDim); + xVar.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.xScale); + xVar.putAtt(""add_offset"",NC_FLOAT,resVar.nav.xOffset); + yVar.putAtt(""scale_factor"",NC_FLOAT,resVar.nav.yScale); + yVar.putAtt(""add_offset"",NC_FLOAT,resVar.nav.yOffset); + + xVar.putVar(resVar.x); + yVar.putVar(resVar.y); + + //add the time variables + NcVar tVar = ncf.addVar(""t"",ncDouble); + tVar.putAtt(""standard_name"",""time""); + tVar.putAtt(""units"",resVar.tUnits); + tVar.putAtt(""axis"",""T""); + tVar.putAtt(""bounds"",""time_bounds""); + tVar.putAtt(""long_name"" , ""J2000 epoch mid-point between the start and end image scan in seconds""); + + tVar.putVar(&resVar.t); + + + vector dims; + dims.push_back(yDim); + dims.push_back(xDim); + + NcVar dataVar = ncf.addVar(""U"",ncDouble,dims); + NcVar dataVar2 = ncf.addVar(""V"",ncDouble,dims); + + if(args.pixuv==1) + { + dataVar22 = ncf.addVar(""Upix"",ncFloat,dims); + dataVar23 = ncf.addVar(""Vpix"",ncFloat,dims); + } + if(args.outrad) dataVar3 = ncf.addVar(""Rad"",ncFloat,dims); + NcVar gipVar = ncf.addVar(""merc_imager_projection"",NC_INT); + NcVar ofVar = ncf.addVar(""optical_flow_settings"",NC_INT); + + dataVar.putAtt(""long_name"",""U""); + dataVar.putAtt(""grid_mapping"",""Mercator Sphere""); + dataVar.putAtt(""scale_factor"",NC_FLOAT,0.01); + if(args.pixuv == 0) + { + dataVar.putAtt(""units"",""meters per second""); + }else{ + dataVar.putAtt(""units"",""x-pixels""); + } + + + dataVar2.putAtt(""long_name"",""V""); + dataVar2.putAtt(""grid_mapping"",""Mercator Sphere""); + dataVar2.putAtt(""scale_factor"",NC_FLOAT,0.01); + + if(args.pixuv == 1) + { + dataVar2.putAtt(""units"",""y-pixels""); + if(args.dosrsal == 1) + { + dataVar22.putAtt(""long_name"",""Upix""); + dataVar23.putAtt(""long_name"",""Vpix""); + } + }else{ + dataVar2.putAtt(""units"",""meters per second""); + } + + + + + if(args.outrad) + { + dataVar3.putAtt(""long_name"",""Rad""); + dataVar3.putAtt(""grid_mapping"",""Mercator Sphere""); + } + gipVar.putAtt(""long_name"" , ""Mercator_Grid""); + gipVar.putAtt(""grid_mapping_name"" , ""Mercator""); + double lon1val = resVar.nav.lon1; + double Rval = resVar.nav.R; + gipVar.putAtt(""lon1"",NC_DOUBLE,lon1val); + gipVar.putAtt(""R"",NC_DOUBLE,Rval); + + ofVar.putAtt(""long_name"" , ""Optical Flow Settings""); + ofVar.putAtt(""key"" , ""1 = Modified Sun (2014), 2 = Farneback, 3 = Brox (2004)""); + if(args.oftype==1) + { + ofVar.putAtt(""lambda"",NC_DOUBLE,args.lambda); + ofVar.putAtt(""lambdac"",NC_DOUBLE,args.lambdac); //hinting term weight (0 if dofirstguess==0) + ofVar.putAtt(""alpha"", NC_DOUBLE,args.alpha); + ofVar.putAtt(""filtsigma"", NC_DOUBLE,args.filtsigma); + ofVar.putAtt(""ScaleF"", NC_DOUBLE,args.scaleF); + ofVar.putAtt(""K_Iterations"", NC_INT,args.kiters); + ofVar.putAtt(""L_Iterations"", NC_INT,args.liters); + ofVar.putAtt(""M_Iterations"", NC_INT,args.miters); + ofVar.putAtt(""CG_Iterations"", NC_INT,args.cgiters); + ofVar.putAtt(""NormMax"", NC_FLOAT,args.NormMax); + ofVar.putAtt(""NormMin"", NC_FLOAT,args.NormMin); + ofVar.putAtt(""dofirstguess"", NC_INT,args.dofirstguess); + } + if(args.oftype==2) + { + ofVar.putAtt(""pyr_scale"",NC_FLOAT,args.fpyr_scale); + ofVar.putAtt(""levels"", NC_INT,args.flevels); + ofVar.putAtt(""winsize"", NC_INT,args.fwinsize); + ofVar.putAtt(""iterations"", NC_INT,args.fiterations); + ofVar.putAtt(""poly_n"", NC_INT,args.poly_n); + ofVar.putAtt(""poly_sigma"", NC_FLOAT,args.poly_sigma); + ofVar.putAtt(""use_initial_flow"", NC_INT,args.uif); + ofVar.putAtt(""farneback_gaussian"", NC_INT,args.fg); + ofVar.putAtt(""NormMax"", NC_FLOAT,args.NormMax); + ofVar.putAtt(""NormMin"", NC_FLOAT,args.NormMin); + } + ofVar.putAtt(""dt_seconds"",NC_FLOAT,resVar.dT); + + dataVar.putVar(resVar.uVal); + dataVar2.putVar(resVar.vVal); + if(args.pixuv == 1) + { + dataVar22.putVar(resVar.uPix); + dataVar23.putVar(resVar.vPix); + } + if(args.outrad) dataVar3.putVar(resVar.data.data); + gipVar.putVar(&resVar.nav.gipVal); + return 0; + } + catch(NcException& e) + { + e.what(); + return NC_ERR; + } +} + + +int oct_filewrite (string fpath,string ftype, GOESVar &resVar,OFFlags args) +{ + int t; + if(ftype==""GOES"") t = oct_goeswrite (fpath,resVar,args); + if(ftype==""POLAR"") t = oct_polarwrite (fpath,resVar,args); + if(ftype==""MERC"") t = oct_mercwrite (fpath,resVar,args); + + return 0; +} +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/oct_interp.cc",".cc","18846","485","#include +#include +#include +#include ""image.h"" +#include ""goesread.h"" +#include ""util.h"" +#include ""offlags.h"" +#include ""oct_bc.h"" +using namespace std; +double oct_binterp(double,double,double,double,double,double,double,double,double,double); + +//A simple optical flow interpolation function following BAKER ET AL. 2011 approach +//Citation: +//Baker, S., Scharstein, D., Lewis, J.P. et al. A Database and Evaluation Methodology for Optical Flow. +//Int J Comput Vis 92, 1–31 (2011). https://doi.org/10.1007/s11263-010-0390-2 +//Still under development, use with caution J. Apke 2/23/2022 +void oct_warpflow(float *u1, float *v1, float *sosarr, float *im1, float *im2,float time,long &holecount, int nx, int ny, float *ut, float *vt) +{ + bool bc, bc2,bc3,bc4; + long nxtny = nx*ny; + + for (int j = 0; j < ny; j++) + { + long nxtj = nx*j; + for(int i = 0; i < nx; i++) + { + long lxyz = i+nxtj; // + //use u to seek u + int iv = (int) oct_bc((double)(round(i+time*u1[lxyz])),nx-1,bc); + int jv = (int) oct_bc((double)(round(j+time*v1[lxyz])),ny-1,bc2); + int iv2 = (int) oct_bc((double)(round(i+u1[lxyz])),nx-1,bc3); + int jv2 = (int) oct_bc((double)(round(j+v1[lxyz])),ny-1,bc4); + + for(int l = 0; l < 2; l++) + { + int posj = jv+l; + int posj2 = jv2 + l; + long nxtposj = nx*posj; + long nxtposj2 = nx*posj2; + for(int k = 0; k < 2; k++) + { + int posi = (int) iv+k; + long lxyz2 = posi+nxtposj; //lxyz at posi posj, or iv+k, jv+l + long lxyz3 = iv2+k + nxtposj2; //lxyz at posi2, posj2 + + //int posi2 = (int) iv2; + //int posj2 = (int) jv2; + double imgdiff = (im1[lxyz]-im2[lxyz3]); + double imgdiff2 = imgdiff*imgdiff; + if((ut[lxyz2] < -998) || (sosarr[lxyz2] > imgdiff2)) + { + if(ut[lxyz2] < -998) holecount -= 1; //reduce the hole count when a point is filled + ut[lxyz2] = u1[lxyz]; + vt[lxyz2] = v1[lxyz]; + sosarr[lxyz2] = imgdiff2; + //Here is where you splat if needed + } + + } + } + } + } +} +int oct_interp (GOESVar &geo1,GOESVar &geo2, float fr, OFFlags args) +{ + int nx = geo1.nav.nx; + int ny = geo1.nav.ny; + int ival,jval; + bool bc,bc3,bc4; + float *u1,*v1,*ut, *vt, *ut2,*vt2,*sosarr,*sosarr2; + double imgnew,imgnew2,imgnew3; + short * occ, *o1a, *o0a; + long nxtny = nx*ny; + occ = new short [nxtny]; + + //note, I changed 2d arrays to 1d as C++ is slower w/ 2d + + //im1 = dMatrix(nx,ny); + //im2 = dMatrix(nx,ny); + //if(args.doc2 == 1) + //{ + // im12 = dMatrix(nx,ny); + // im22 = dMatrix(nx,ny); + //} + //if(args.doc3 == 1) + //{ + // im13 = dMatrix(nx,ny); + // im23 = dMatrix(nx,ny); + //} + //u1 = dMatrix(nx,ny); + //v1 = dMatrix(nx,ny); + ut = new float [nxtny]; //dMatrix(nx,ny); + vt = new float [nxtny]; //dMatrix(nx,ny); + ut2 = new float [nxtny]; //dMatrix(nx,ny); + vt2 = new float [nxtny]; //dMatrix(nx,ny); + sosarr = new float [nxtny]; //dMatrix(nx,ny); + sosarr2 = new float [nxtny]; //dMatrix(nx,ny); + o1a = new short [nxtny]; + o0a = new short [nxtny]; + + + long holecount = nx*ny, holecount2 = nx*ny; + for (int j = 0; j < ny; j++) + { + long nxtj = nx*j; + for(int i = 0; i < nx; i++) + { + long lxyz = i+nx*j; + //Forward flow + //u1[i][j] = 20.; //geo1.u1[lxyz]; + //v1[i][j] = 0.; //geo1.v1[lxyz]; + //BELOW IS LINEAR INTERPOLATION + //u1[i][j] = 0.; //geo1.u1[lxyz]; + //v1[i][j] = 0.; //geo1.v1[lxyz]; + //BELOW IS THE ACTUAL FLOW, USE WHEN READY!!!! + //u1[i][j] = geo1.u1[lxyz]; + //v1[i][j] = geo1.v1[lxyz]; + ut[lxyz] = -999.; + vt[lxyz] = -999.; + + ut2[lxyz] = -999.; + vt2[lxyz] = -999.; + sosarr[lxyz] = 999999.; + sosarr2[lxyz] = 999999.; + + + + } + } + + float frinv = fr; + geo1.frdt = (float) frinv; + geo1.tint = geo1.t+(double) geo1.dT*fr; + float time = frinv; + //The color constancy test only uses channel1 right now, it will soon involve channels 2/3 as well + oct_warpflow(geo1.uPix, geo1.vPix, sosarr,geo1.data.data,geo2.data.data, time,holecount, nx, ny, ut, vt); //warps the flow up to the value of time at interpolate + //This is superfluous to above... + //for(int i = 0; i < nx; i++) + //{ + // for (int j = 0; j < ny; j++) + // { + // //use u to seek u + // double iv = round(i+time*u1[i][j]); + // double jv = round(j+time*v1[i][j]); + // if((iv >= 0) && (iv < nx) && (jv >= 0) && (jv < ny)) + // { + // int posi = (int) iv; + // int posj = (int) jv; + // if(ut[posi][posj] < -998) + // { + // ut[posi][posj] = u1[i][j]; + // vt[posi][posj] = v1[i][j]; + // holecount -= 1; //found one, reduce the holecount + // } else + // { + // //this is the case of multiple motions for the same pixel + // double iv2 = round(i + u1[i][j]); + // double jv2 = round(j + v1[i][j]); + // int posi2 = (int) iv2; + // int posj2 = (int) jv2; + // if((iv2 >= 0) && (iv2 < nx) && (jv2 >= 0) && (jv2 < ny)) + // { + // double imgdiff = (im1[i][j]-im2[posi2][posj2]); + // double imgdiff2 = imgdiff*imgdiff; + // if(sosarr[posi][posj] > (imgdiff*imgdiff)) + // { + // //passed the color constancy test + // ut[posi][posj] = u1[i][j]; + // vt[posi][posj] = v1[i][j]; + // sosarr[posi][posj] = imgdiff2; + // //Baker used splatting here to reduce interpolated flow holes + // } + // } + + // + // } + // } + // } + //} + //Now that ut and vt have their initial fill, we need to fill in holes + //I will use an outside-in filling strategy, with forward and backward looping + int rev = 0; + while(holecount > 0) + { + for (int j = 0; j < ny; j++) + { + if(rev == 0) { + jval = j; + } else + { + jval = ny-1-j; + } + int kmax = nx+nx; + int kmin = -nx; + int lmin = -1; + int lmax = 2; + if(j == 0) kmin = 0; + if(j == ny-1) kmax = nx; + long nxtjval = nx*jval; + for(int i = 0; i < nx; i++) + { + if(i == 0) lmin = 0; + if(i == nx-1) lmax = 1; + if(rev == 0){ + ival = i; + } else + { + ival = nx-1-i; + } + long lxyzval = ival + nxtjval; + if(ut[lxyzval] < -998) + { + //do an average to fill the hole + double num1 = 0; + double sum1 = 0; + double sum2 = 0; + //streamline this to for loop + for(int k = kmin; k < kmax; k=k+nx) + { + for(int l = lmin; l < lmax; l++) + { + int iv1 = ival + k; + int jv1 = jval + l; + long lxyzval1 = lxyzval + k + l; + if(ut[lxyzval1] > -998) + { + sum1 += ut[lxyzval1]; + sum2 += vt[lxyzval1]; + num1 += 1; + } + } + + } + + if(num1 > 0) + { + ut[lxyzval] = sum1/num1; + vt[lxyzval] = sum2/num1; + holecount -= 1; + } + } + } + } //end i j for loops + if(rev == 0){ + rev = 1; + } else + { + rev= 0; + } + } //end while + //holes will be filled now + //Now it is time for occlusion reasoning. + // again, following Baker 2011 + //now check to ensure we have flow consistency + oct_warpflow(geo1.uPix, geo1.vPix, sosarr2,geo1.data.data,geo2.data.data, 1.,holecount2, nx, ny, ut2, vt2); //warps the flow up to the value of time at image 2 + //occlusion masks are set here + for(int j = 0; j < ny; j++) + { + long nxtj = nx*j; + for(int i = 0; i < nx; i++) + { + long lxyz = i+nxtj; + o1a[lxyz] = 0; + o0a[lxyz] = 0; + if(ut2[lxyz] < -998) + { + o1a[lxyz] = 1; + } else + { + int iv = (int) oct_bc((double)(round(i+geo1.uPix[lxyz])),nx-1,bc3); + int jv = (int) oct_bc((double)(round(j+geo1.vPix[lxyz])),ny-1,bc4); + long lxyz2 = iv+jv*nx; + + double sqrval1 = geo1.uPix[lxyz] - ut2[lxyz2]; + double sqrval2 = geo1.vPix[lxyz] - vt2[lxyz2]; + if(sqrval1*sqrval1+sqrval2*sqrval2 > 0.25) //note 0.5^2 = 0.25 + { + o0a[lxyz] = 1; + } + } + + + } + } + + for (int j = 0; j < ny; j++) + { + long nxtj = j*nx; + for(int i = 0; i < nx; i++) + { + long lxyz = i + nxtj; + //Below was incorrect, I have fixed with the nested for loop above filling o0 and o1 + //int o1 = 0; + //int o0 = 0; + ////set the occlusion masks, note, this is still under development, masks may be bugged -J. Apke 2/24/2022 + //if(ut2[i][j] < -998) o1 = 1; //this means the second image is occluded here + ////if(ut2[i][j] < -998) o0 = 1; //this means the second image is occluded here + //double iv = round(i+u1[i][j]); + //double jv = round(j+v1[i][j]); + ////if((iv >= 0) && (iv < nx) && (jv >=0) && (jv < ny) && (o0==0)) + //if((iv >= 0) && (iv < nx) && (jv >=0) && (jv < ny) && o1==0) + //{ + // int posi = (int) iv; + // int posj = (int) jv; + // double sqrval1 = u1[i][j] - ut2[posi][posj]; + // double sqrval2 = v1[i][j] - vt2[posi][posj]; + // if(sqrval1*sqrval1+sqrval2*sqrval2 > 0.25) //note 0.5^2 = 0.25 + // { + // o0 = 1; //this means the first image is occluded here + // //o1 = 1; //this means the first image is occluded here + // //printf(""Ok heres the Os %d %d %f %f \n"",o0,o1, u1[i][j],ut2[posi][posj]); + // } + //} + + //We now have enough information to interpolate, lets do it! + double x00 = oct_bc((double)(i-time*ut[lxyz]),nx-1,bc); + double y00 = oct_bc((double)(j-time*vt[lxyz]),ny-1,bc); + double x10 = oct_bc((double)(i+(1-time)*ut[lxyz]),nx-1,bc); + double y10 = oct_bc((double)(j+(1-time)*vt[lxyz]),ny-1,bc); + int x0i = (int) (x00+0.5); + int y0i = (int) (y00+0.5); //location of nearest pix at x0, y0 + long lxyz00 = x0i + nx*y0i; + int x1i = (int) (x10+0.5); + int y1i = (int) (y10+0.5); //location of nearest pix at x1, y1 + long lxyz11 = x1i + nx*y1i; + + double x1 = (double) ((int) x00); + double x2 = x1+1; + double y1 = (double) ((int) y00); + double y2 = y1+1; + long lxyz1 = ((int) x1) + nx*((int) y1); + long lxyz2 = ((int) x2) + nx*((int) y1); + long lxyz3 = ((int) x1) + nx*((int) y2); + long lxyz4 = ((int) x2) + nx*((int) y2); + double f11 = geo1.data.data[lxyz1]; //im1[(int) x1][(int) y1]; + double f21 = geo1.data.data[lxyz2]; //im1[(int) x2][(int) y1]; + double f12 = geo1.data.data[lxyz3]; //im1[(int) x1][(int) y2]; + double f22 = geo1.data.data[lxyz4]; //im1[(int) x2][(int) y2]; + //bilinear interpolation of boundary condition corrected points + double I0X0=oct_binterp (x00, y00,x1, x2, y1, y2, f11, f21, f12, f22); + double I0X02, I0X03; + if(args.doc2 == 1) + { + f11 = geo1.data.data[lxyz1+nxtny]; //im12[(int) x1][(int) y1]; + f21 = geo1.data.data[lxyz2+nxtny]; //im12[(int) x2][(int) y1]; + f12 = geo1.data.data[lxyz3+nxtny]; //im12[(int) x1][(int) y2]; + f22 = geo1.data.data[lxyz4+nxtny]; //im12[(int) x2][(int) y2]; + I0X02=oct_binterp (x00, y00,x1, x2, y1, y2, f11, f21, f12, f22); + } + if(args.doc3 == 1) + { + f11 = geo1.data.data[lxyz1+nxtny+nxtny]; //im13[(int) x1][(int) y1]; + f21 = geo1.data.data[lxyz2+nxtny+nxtny]; //im13[(int) x2][(int) y1]; + f12 = geo1.data.data[lxyz3+nxtny+nxtny]; //im13[(int) x1][(int) y2]; + f22 = geo1.data.data[lxyz4+nxtny+nxtny]; //im13[(int) x2][(int) y2]; + I0X03=oct_binterp (x00, y00,x1, x2, y1, y2, f11, f21, f12, f22); + } + + + x1 = (double) ((int) x10); + x2 = x1+1; + y1 = (double) ((int) y10); + y2 = y1+1; + lxyz1 = ((int) x1) + nx*((int) y1); + lxyz2 = ((int) x2) + nx*((int) y1); + lxyz3 = ((int) x1) + nx*((int) y2); + lxyz4 = ((int) x2) + nx*((int) y2); + f11 = geo2.data.data[lxyz1]; //im1[(int) x1][(int) y1]; + f21 = geo2.data.data[lxyz2]; //im1[(int) x2][(int) y1]; + f12 = geo2.data.data[lxyz3]; //im1[(int) x1][(int) y2]; + f22 = geo2.data.data[lxyz4]; //im1[(int) x2][(int) y2]; + + + double I1X1=oct_binterp (x10, y10,x1, x2, y1, y2, f11, f21, f12, f22); + double I1X12, I1X13; + if(args.doc2 == 1) + { + f11 = geo2.data.data[lxyz1+nxtny]; //im22[(int) x1][(int) y1]; + f21 = geo2.data.data[lxyz2+nxtny]; //im22[(int) x2][(int) y1]; + f12 = geo2.data.data[lxyz3+nxtny]; //im22[(int) x1][(int) y2]; + f22 = geo2.data.data[lxyz4+nxtny]; //im22[(int) x2][(int) y2]; + I1X12=oct_binterp (x10, y10,x1, x2, y1, y2, f11, f21, f12, f22); + } + if(args.doc3 == 1) + { + f11 = geo2.data.data[lxyz1+nxtny+nxtny]; //im23[(int) x1][(int) y1]; + f21 = geo2.data.data[lxyz2+nxtny+nxtny]; //im23[(int) x2][(int) y1]; + f12 = geo2.data.data[lxyz3+nxtny+nxtny]; //im23[(int) x1][(int) y2]; + f22 = geo2.data.data[lxyz4+nxtny+nxtny]; //im23[(int) x2][(int) y2]; + I1X13=oct_binterp (x10, y10,x1, x2, y1, y2, f11, f21, f12, f22); + } + + + //Uncomment below to turn off occlusion masks + //o0 = 0; o1 = 0; + short o0 = o0a[lxyz00]; + short o1 = o1a[lxyz11]; + occ[lxyz] = 0; + + if((o0 == 0) && (o1 == 0)) + { + imgnew = (1.-time)*I0X0 + time*(I1X1); + if(args.doc2 == 1) + { + imgnew2 = (1.-time)*I0X02 + time*(I1X12); + } + if(args.doc3 == 1) + { + imgnew3 = (1.-time)*I0X03 + time*(I1X13); + } + } else if(o1 == 1) + { + occ[lxyz] = 2; + imgnew = I0X0; + if(args.doc2 == 1) imgnew2 = I0X02; + if(args.doc3 == 1) imgnew3 = I0X03; + } else{ + imgnew = I1X1; + occ[lxyz] = 1; + if(args.doc2 == 1) imgnew2 = I1X12; + if(args.doc3 == 1) imgnew3 = I1X13; + } + + //set the datasval now to the interpolated value + //long lxyz = i+nx*j; + if(args.dopolar == 0) + { + //geo1.dataSVal[lxyz] = (short)((imgnew)); + //Scale the image back to native values + //Note this currently assumes minout and maxout from fileread are 0 - 255, must add to args + float imgscale = (imgnew/255.) * (args.NormMax-args.NormMin)+args.NormMin; + geo1.dataSVal[lxyz] = (short)((imgscale - geo1.nav.radOffset)/(geo1.nav.radScale)); + if(args.doc2 == 1) + { + imgscale = (imgnew2/255.) * (args.NormMax2-args.NormMin2)+args.NormMin2; + geo1.dataSVal2[lxyz] = (short) ((imgscale - geo1.nav.radOffset2)/(geo1.nav.radScale2)); + } + if(args.doc3 == 1) + { + imgscale = (imgnew3/255.) * (args.NormMax3-args.NormMin3)+args.NormMin3; + geo1.dataSVal3[lxyz] = (short) ((imgnew3 - geo1.nav.radOffset3)/(geo1.nav.radScale3)); + } + } else { + float imgscale = (imgnew/255.) * (args.NormMax-args.NormMin)+args.NormMin; + geo1.dataSValfloat[lxyz] = (float) imgscale; + if(args.doc2 == 1) + { + imgscale = (imgnew2/255.) * (args.NormMax2-args.NormMin2)+args.NormMin2; + geo1.dataSValfloat2[lxyz] = (float) imgscale; + } + if(args.doc3 == 1) + { + imgscale = (imgnew3/255.) * (args.NormMax3-args.NormMin3)+args.NormMin3; + geo1.dataSValfloat3[lxyz] = (float) imgscale; + } + + } + } + } + + + + //free_dMatrix(im1,nx); + //free_dMatrix(im2,nx); + //free_dMatrix(u1,nx); + //free_dMatrix(v1,nx); + //free_dMatrix(ut,nx); + //free_dMatrix(vt,nx); + //free_dMatrix(ut2,nx); + //free_dMatrix(vt2,nx); + //free_dMatrix(sosarr,nx); + //free_dMatrix(sosarr2,nx); + delete [] ut; + delete [] vt; + delete [] ut2; + delete [] vt2; + delete [] sosarr; + delete [] sosarr2; + delete [] o1a; + delete [] o0a; + geo1.occlusion = occ; + + return 1; +} +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/main.cc",".cc","19039","485","// File Name: main.cc +// Purpose: Wrapper for OCTANE functions, reads in command line args and launches optical flow code +// Inputs: Command Line Arguements (for a list, simply execute octane with no arguments) +// Outputs: Output is a netcdf file called ""outfile.nc"", placed in working directory +// Author: Jason Apke +// Contact: jason.apke@colostate.edu + +#include +#include +#include +#include +#include +#include +#include ""image.h"" +#include ""goesread.h"" +#include ""offlags.h"" +using namespace std; + +int oct_fileread(string,string,string,int,int,GOESVar &,OFFlags &); +int oct_filewrite(string,string,GOESVar &,OFFlags); +int oct_interp (GOESVar &,GOESVar &, float, OFFlags); +int oct_optical_flow(GOESVar &, GOESVar &,OFFlags &); + + + +int main(int argc, char *argv[]) +{ + //A few integers to check if things run smoothly + int z; + int t; + OFFlags args; //a structure holding pertinent arguments + int t3; + string interpoutloc; + //File name strings + string f1, f2; + string fc21, fc22; + string fc31, fc32; + string f1c, f2c; + string f1fg; + string interploc,outdir; + //Argument definitions, used to be in order, though I removed a few with time + string arg1=""-i1"",arg2=""-i2"",arg4=""-i1cth"",arg5=""-i2cth"",arg7=""-farn"",arg8=""-pd"",arg9=""-srsal""; + string arg10=""-Polar"",arg11=""-Merc"",arg12=""-ahi"",arg13=""-ir"",arg14=""-sosm"",arg15=""-interp""; + string arg16=""-ic21"",arg17=""-ic22""; + string arg19=""-ic31"",arg20=""-ic32""; + string arg22=""-alpha"",arg23=""-lambda"",arg24=""-scsig"",arg25=""-alpha2"",arg26=""-lambdac""; + string arg27=""-fwinsize"",arg28=""-polyn"",arg29=""-nncth"",arg30=""-inv"",arg31=""-ctt"",arg32=""-kiters"",arg33=""-brox"",arg34=""-corn""; + string arg35=""-firstguess"",arg36=""-rad"",arg37=""-srad"",arg38=""-liters"",arg39=""-deltat"",arg40=""-interploc""; + string arg41=""-no_outnav"", arg42=""-no_outraw"", arg43=""-no_outrad"",arg44=""-no_outctp"",arg45=""-set_device""; + string arg46=""-normmax"", arg47=""-normmin"", arg48=""-normmax2"", arg49=""-normmin2"", arg50=""-normmax3"", arg51=""-normmin3"",arg52=""-o""; + GOESVar goesData,goesData2; //,goesData3; + + args.farn =0; //farneback set off, use -farn to turn on + args.pixuv = 0; //output set to wind, use -pd to set to pixel displacement instead + args.dosrsal= 0; //output not smoothed by default for SRSAL, set -srsal to perform this + args.dopolar= 0; //input has a polar orthonormal grid + args.domerc= 0; //input has a mercator grid + args.ftype=""GOES""; + //Farneback defaults, eventually I will move these out of code so I don't have to keep updating git to change them + args.fpyr_scale=0.5; + args.flevels=2; + args.fwinsize=20; + args.fiterations=5; + args.poly_n=10; + args.poly_sigma=0.5; + args.uif = 0; + args.fg = 1; + args.dofirstguess = 0; + args.ir = 0; + args.dososm = 0; + args.dointerp=0; + interploc=""./interpolation""; + outdir = ""./""; + args.docorn = 0; + args.rad = 2; + args.srad = 2; + args.lambda=1.; //I found this too large, it introduced noise every once in a while, especially on boundary pixels + args.alpha=5.; + //I use this below when I have no median smoothing + args.filtsigma=3.; //deprecated + args.scaleF=0.5; + args.kiters=4; + args.alpha2=20.; //relevant for div-curl expansion, not for what we do yet + args.lambdac=0.; + args.liters=3; //3; + args.cgiters=30; + args.miters=5; + args.scsig=400.; //deprecated + args.interpcth = 1; + args.deltat = 60.; + args.doc2 = 0; + args.doahi = 0; + args.doc3 = 0; + args.doinv = 0; + args.doctt = 0; //deprecated + args.doCTH = 0; + args.dozim = 1; + args.outraw= true; + args.outctp= true; + args.outrad= true; + args.outnav= true; + args.setdevice = 0; + args.setNormMax=true; + args.setNormMin=true; + args.setNormMax2=true; + args.setNormMin2=true; + args.setNormMax3=true; + args.setNormMin3=true; + ////////////////////////////////End setting the defaults + + cout<< ""Beginning variational dense optical flow..."" << endl; + if(argc < 4) + { + cout << ""Optical Flow Toolkit for Atmospheric aNd Earth sciences (OCTANE)\n""; + cout << ""Author: Jason Apke\n""; + cout << ""Contact: jason.apke@colostate.edu\n""; + cout << ""input flags:\n\n""; + cout << ""-i1 , -i2 are the GOES-R file netcdf full paths, i1 is the first image, i2 is the second\n\n""; + cout << ""-i1cth , -i2cth are optional paths to cloud top height netcdfs \n\n""; + cout << ""-nncth instead of default bilinear interpolation, remap the CTH grids with nearest neighbor \n\n""; + cout << ""-o writes the file to the designated directory, include slash at the end (default is ./) \n\n""; + cout << ""-pd forces OCTANE to return unnavigated pixel displacements \n\n""; + cout << ""-srsal has OCTANE return bilinearly smoothed optical flow output (useful for computing cloud-top divergence) \n\n""; + cout << ""-Polar use this flag to ingest polar-orthonormal grid images instead of GOES projections (used for sea-ice tracking)\n\n""; + cout << ""-Merc use this flag to injest mercator grid images instead of GOES projections (definition) \n\n""; + cout << ""-ahi (deprecated) use when reading netcdfs converted from AHI binaries \n\n""; + cout << ""-ir use this flag to output ir temperatures instead of cloud-top height (changes the scaling of the short variable ctp)\n\n""; + cout << ""-sosm use this flag to perform least-squares minimization or patch-match tracking instead of zimmer optical flow \n\n""; + cout << ""-rad set the target radius (in x- and y-pixels) for sosm tracking \n\n""; + cout << ""-srad set the search radius (in x- and y-pixels) for sosm tracking \n\n""; + cout << ""-interp use this flag to perform optical flow image interpolation (temporal for image sequences) \n\n""; + cout << ""-normmin(2|3) sets the image brighness minimum for normalization (defaults for each band from GOES determined in OCT bandminmax) \n\n""; + cout << ""-normmax(2|3) sets the image brighness maxiumum for normalization (defaults for each band from GOES determined in OCT bandminmax) \n\n""; + cout << ""-deltat use this to set the framerate for optical flow interpolation (in seconds) \n\n""; + cout << ""-interpout use this to set the output for the interp directory \n\n""; + //Need to add flags for interpolation -J. Apke 2/10/2022 + cout << ""-ic21 -ic22 are flags to input another channel (ch2) for files 1 and 2 (useful for RGB tracking)\n\n""; + cout << ""-ic31 -ic32 are flags to input another channel (ch3) for files 1 and 2 (useful for RGB tracking)\n\n""; + cout << ""-alpha is a flag to set the smoothness constraint constant for Brox/Zimmer-based approaches \n\n""; + cout << ""-lambda is a flag to set the gradient constraint constant for Brox/Zimmer-based approaches \n\n""; + cout << ""-lambdac this is to set the weight of a hinting term, only used when -firstguess is active \n\n""; + cout << ""-kiters number of outer iterations/pyramid levels in Brox/Zimmer-based approaches, default is 4 \n\n""; + cout << ""-liters number of inner iterations/pyramid levels in Brox/Zimmer-based approaches, default is 3 (use more if OF struggles to converge)\n\n""; + cout << ""-cgiters maximum number of preconditioned conjugate gradient iterations in Brox/Zimmer-based approaches, default is 30 (use more if OF struggles to converge)\n\n""; + cout << ""-scsig (deprecated) this was the sigma value for experimental smoothness constraint robust functions which are no longer used \n\n""; + cout << ""-alpha2 (deprecated) This is a smoothness constraint constant term for a div-curl approach which is not yet added (but planned) within the Conjugate Gradient solver \n\n""; + cout << ""Farneback Definitions (All Deprecated) \n\n""; + cout << ""-fwinsize -polyn both are inputs to Farneback opencv algorithm (see documentation here https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html \n\n""; + cout << ""-inv (deprecated) flag to pass through inversion check from clavrx files \n\n""; + cout << ""-ctt (deprecated) flag to pass through estimated temperatures from clavrx files \n\n""; + cout << ""-brox set to perform pure Brox approach (default is modified zimmer), this flag turns off gradient (and lapacian) scaling of brightness (and gradient) constraints \n\n""; + cout << ""-corn set to output a Shi/Tomasi 1996 corner detection algorithm, and output corner locations \n\n""; + cout << ""-firstguess set to input a first guess file (Only for GOES files, motions must be navigated) \n\n""; + cout << ""-no_outnav turns off output of navigated u/v optical flow motions\n\n""; + cout << ""-no_outraw turns off output of raw (pixel) u/v optical flow displacements\n\n""; + cout << ""-no_outrad turns off output of imagery used to derive optical flow\n\n""; + cout << ""-no_outctp turns off output of cloud-top height (or infrared) data used with the imagery\n\n""; + cout << ""-set_device sets which gpu to run on, 1-based (default is 1), must be less than # of gpus \n\n""; + cout << ""Below is an example running octane on a sequence of GOES image files: \n\n""; + + cout << ""./octane \n""; + + return 0; + } + //may want to switch these int switches to boolean flags -J. Apke 2/10/2022 + for (int i = 0; i < argc; ++i) + { + if(!arg1.compare(argv[i])) + f1=argv[i+1]; + if(!arg2.compare(argv[i])) + f2=argv[i+1]; + if(!arg4.compare(argv[i])) + { + f1c=argv[i+1]; + args.doCTH = 1; + } + if(!arg5.compare(argv[i])) + f2c=argv[i+1]; + if(!arg7.compare(argv[i])) + { + args.farn=1; + printf(""Farneback disabled for this version of OCTANE, run without -farn, exiting...""); + exit(0); + } + if(!arg8.compare(argv[i])) + args.pixuv=1; + if(!arg9.compare(argv[i])) + args.dosrsal=1; + if(!arg10.compare(argv[i])) + { + args.dopolar=1; + args.ftype=""POLAR""; + } + if(!arg11.compare(argv[i])) + { + args.domerc=1; + args.ftype=""MERC""; + } + if(!arg12.compare(argv[i])) + args.doahi=1; + if(!arg13.compare(argv[i])) + args.ir=1; + if(!arg14.compare(argv[i])) + args.dososm=1; + if(!arg15.compare(argv[i])) + args.dointerp=1; + if(!arg16.compare(argv[i])) + { + args.doc2=1; + fc21=argv[i+1]; + } + if(!arg17.compare(argv[i])) + fc22=argv[i+1]; + //Third channel option + if(!arg19.compare(argv[i])) + { + args.doc3=1; + fc31=argv[i+1]; + } + if(!arg20.compare(argv[i])) + fc32=argv[i+1]; + if(!arg22.compare(argv[i])) + { + args.alpha=atof(argv[i+1]); + } + if(!arg23.compare(argv[i])) + args.lambda=atof(argv[i+1]); + if(!arg24.compare(argv[i])) + args.scsig=atof(argv[i+1])*atof(argv[i+1]); + if(!arg25.compare(argv[i])) + { + args.alpha2=atof(argv[i+1]); + } + if(!arg26.compare(argv[i])) + { + args.lambdac=atof(argv[i+1]); + } + if(!arg27.compare(argv[i])) + { + args.fwinsize=atoi(argv[i+1]); + } + if(!arg28.compare(argv[i])) + { + args.poly_n=atoi(argv[i+1]); + } + if(!arg29.compare(argv[i])) + { + args.interpcth=0; + } + if(!arg30.compare(argv[i])) + { + args.doinv=1; + } + if(!arg31.compare(argv[i])) + { + args.doctt=1; + } + if(!arg32.compare(argv[i])) + { + args.kiters=atoi(argv[i+1]); + } + if(!arg38.compare(argv[i])) + { + args.liters=atoi(argv[i+1]); + } + if(!arg33.compare(argv[i])) + { + args.dozim=0; + } + if(!arg34.compare(argv[i])) + { + args.docorn=0; + } + if(!arg35.compare(argv[i])) + { + args.dofirstguess =1; + f1fg=argv[i+1]; + } + if(!arg36.compare(argv[i])) + { + args.rad=atoi(argv[i+1]); + } + if(!arg37.compare(argv[i])) + { + args.srad=atoi(argv[i+1]); + } + if(!arg39.compare(argv[i])) + { + args.deltat=atof(argv[i+1]); + } + if(!arg40.compare(argv[i])) + { + interploc=argv[i+1]; + } + if(!arg41.compare(argv[i])) + { + args.outnav=false; + } + if(!arg42.compare(argv[i])) + { + args.outraw=false; + } + if(!arg43.compare(argv[i])) + { + args.outrad=false; + } + if(!arg44.compare(argv[i])) + { + args.outctp=false; + } + if(!arg45.compare(argv[i])) + { + args.setdevice=atoi(argv[i+1])-1; + } + if(!arg46.compare(argv[i])) + { + args.NormMax=atof(argv[i+1]); + args.setNormMax=false; + } + if(!arg47.compare(argv[i])) + { + args.NormMin=atof(argv[i+1]); + args.setNormMin=false; + } + if(!arg48.compare(argv[i])) + { + args.NormMax2=atof(argv[i+1]); + args.setNormMax2=false; + } + if(!arg49.compare(argv[i])) + { + args.NormMin2=atof(argv[i+1]); + args.setNormMin2=false; + } + if(!arg50.compare(argv[i])) + { + args.NormMax3=atof(argv[i+1]); + args.setNormMax3=false; + } + if(!arg51.compare(argv[i])) + { + args.NormMin3=atof(argv[i+1]); + args.setNormMin3=false; + } + if(!arg52.compare(argv[i])) + { + outdir=argv[i+1]; + } + + } + //quick check for multi-channel files + if((args.doc2 == 1) && ((fc22 == ""none""))) + { + printf(""Missing files for second channel...stopping \n""); + exit(0); + } + if((args.doc3 == 1) && ((fc32 == ""none""))) + { + printf(""Missing files for third channel...stopping \n""); + exit(0); + } + //Set the optical flow type for the output file based on input command line arguments + if(args.farn == 1) + { + // 2 for farneback, which will be removed from OCTANE to remove dependencies on OPENCV + args.oftype=2; + } else{ + //1 for Zimmer + args.oftype=1; + if(args.dozim == 0) + { + //3 for Brox + args.oftype=3; + } + } + if(args.dososm == 1) + { + args.oftype=4; + } + if(args.dopolar == 1) + { + args.doCTH = 0; + } + if(args.domerc == 1) + { + args.doCTH = 0; + } + if(args.doahi == 1) + { + args.doCTH = 0; + } + + cout <<""Here are the file names being used: \n""; + cout <<""File 1 : "" << f1 << endl; + cout <<""File 2 : "" << f2 << endl; + //First step is to read the GOES data, jma_goesread/polarread/mercread fills the GOESVar variables + + t = oct_fileread(f1,args.ftype,""RAW"",1,1,goesData,args); + t = oct_fileread(f2,args.ftype,""RAW"",0,1,goesData2,args); + + if((args.dopolar==0) && (args.domerc==0)) + { + goesData.nav.g2xOffset = goesData2.nav.xOffset; + goesData.nav.g2yOffset = goesData2.nav.yOffset; + } + //This is a reader for ancilliary cloud-top height information, which gets stored in the goesData objects + if(args.doCTH == 1){ + t = oct_fileread(f1c,""CLAVRX"",""RAW"",0,0,goesData,args); + } + if(args.dofirstguess == 1){ + t = oct_fileread(f1fg,""FIRSTGUESS"",""RAW"",0,0,goesData,args); + } + + //Below are readers for multi-channel inputs. Currently, 3 channels are the most allowed + if(args.doc2 == 1) + { + //I will need to add this support later -J. Apke 2/11/2022 + if((args.domerc == 1) ) + { + printf(""Mercator multi-channel not compatable with this version, use single channel only\n""); + exit(0); + } + t = oct_fileread(fc21,args.ftype,""RAW"",1,2,goesData,args); + t = oct_fileread(fc22,args.ftype,""RAW"",0,2,goesData2,args); + } + if(args.doc3 == 1) + { + if((args.domerc == 1) ) + { + printf(""Mercator multi-channel not compatable with this version, use single channel only\n""); + exit(0); + } + t = oct_fileread(fc31,args.ftype,""RAW"",1,3,goesData,args); + t = oct_fileread(fc32,args.ftype,""RAW"",0,3,goesData2,args); + } + + //The function below is the primary optical flow computation algorithm. It reads in the goesData objects for each image (goesData and + //goesData2), and the arguments provided for the optical flow settings, and fills the uval and vval arrays in the goesData objects + t3 = oct_optical_flow(goesData,goesData2,args); + + //The interpolated files have slightly different output in filewrite, hence the setting below + args.putinterp = 0; + string outname = outdir+""outfile.nc""; + if(args.ftype==""POLAR"") outname=outdir+""outfile_polar.nc""; + if(args.ftype==""MERC"") outname=outdir+""outfile_merc.nc""; + //Writes the output file + t = oct_filewrite(outname,args.ftype,goesData,args); + cout << outname << "" written\n""; + + if(args.dointerp == 1) + { + //loop through each individual interp file requested, and write it out + cout << ""Interpolation flag on, be warned, this part is still under development!!!\n""; + int fwriteint = 1; + float deltat = args.deltat; //deltat in seconds that we want to change + float frt = deltat/goesData.dT; + cout << ""Outputting files every "" << deltat << "" seconds\n""; + + args.putinterp = 1; + while((frt < 1.) && ((1.-frt) >= ((deltat/goesData.dT)/2.))) + { + std::stringstream interpoutlocstr; + interpoutlocstr << fwriteint; + string interpoutloc; + t3 = oct_interp(goesData,goesData2,frt,args); + if(args.dopolar == 1) + { + interpoutloc = interploc+""/outfile_interp_polar""+interpoutlocstr.str()+"".nc""; + } else + { + interpoutloc = interploc+""/outfile_interp""+interpoutlocstr.str()+"".nc""; + } + t = oct_filewrite(interpoutloc,args.ftype,goesData,args); + fwriteint++; + frt += deltat/goesData.dT; + cout << interpoutloc << "" written"" << endl; + cout << ""FRT "" << frt << "" out of 1 "" << endl; + } + + } + cout << ""OCTANE completed, exiting\n""; + + return 0; +} +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/oct_gaussian.cc",".cc","6623","225","#include +#include +#include +#include +#include ""util.h"" +#include ""oct_gaussian.h"" +#include ""oct_bc.h"" +using namespace std; +//Purpose: A collection of functions for gaussian smoothing on the CPU. +//Note: Variational_Optical_Flow now performs this on the GPU (faster), so below +//are only used for file-read image smoothing now +void oct_getGaussian(double **GKernel,int wk,double sigma) +{ + double r, s = 2.0 * sigma * sigma; + + // sum is for normalization + double sum = 0.0; + int wk2 = (wk-1)/2; + // generating wkxwk kernel + for (int x = -wk2; x <= wk2; x++) { + for (int y = -wk2; y <= wk2; y++) { + r = sqrt(x * x + y * y); + GKernel[x + wk2][y + wk2] = (exp(-(r * r) / s)) / (M_PI * s); + sum += GKernel[x + wk2][y + wk2]; + } + } + + // normalising the Kernel + for (int i = 0; i < wk; ++i) + for (int j = 0; j < wk; ++j) + GKernel[i][j] /= sum; +} + +void oct_getGaussian_1D(double *GKernel,int wk,double sigma) +{ + double r, s = 2.0 * sigma * sigma; + + double sum = 0.0; + int wk2 = (wk-1)/2; + for (int x = -wk2; x <= wk2; x++) { + r = x ; + GKernel[x + wk2] = (exp(-(r * r) / s)) / (M_PI * s); + sum += GKernel[x + wk2]; + } + + for (int i = 0; i < wk; ++i) + GKernel[i] /= sum; +} +void oct_gaussian(double * image, int nx, int ny,double sigma) +{ + double * geosub112; + double *GK; + bool bc; + int filtsize = (int) 2*sigma; + if (filtsize <5) + filtsize=5; + geosub112 = new double[nx*ny]; + + GK = new double [2*filtsize+1]; + oct_getGaussian_1D(GK,2*filtsize+1,sigma); + for(int ii2 = 0; ii2(ii2+kk2,nx,bc); + int lxyz_en = iiv+nx*jj2; //(ii2+kk2)+nx*(jj2+ll2); + wsum = wsum+GK[kk2+filtsize]*image[lxyz_en]; + } + //average + //vertical convolution + geosub112[lxyz] = wsum; + + + } + } + for(int ii23 = 0; ii23(jj23+ll2,ny,bc); + long lxyz_en = ii23+nx*jjv; + wsum = wsum+GK[ll2+filtsize]*geosub112[lxyz_en]; + } + image[lxyz] = wsum; + } + } + + + delete [] geosub112; + delete [] GK; +} +void oct_gaussian2(double ** image, int nx, int ny,double sigma,int nchan) +{ + //A gaussian function specifically designed for multi-channel + double * geosub112; + double *GK; + bool bc; + int filtsize = (int) 2*sigma; + if (filtsize <5) + filtsize=5; + geosub112 = new double[nx*ny]; + + GK = new double [2*filtsize+1]; + oct_getGaussian_1D(GK,2*filtsize+1,sigma); + for(int nc = 0; nc< nchan; nc++) + { + for(int ii2 = 0; ii2(ii2+kk2,nx,bc); + int lxyz_en = iiv+nx*jj2; + wsum = wsum+GK[kk2+filtsize]*image[lxyz_en][nc]; + } + geosub112[lxyz] = wsum; + + + } + } + for(int ii23 = 0; ii23(jj23+ll2,ny,bc); + long lxyz_en = ii23+nx*jjv; + wsum = wsum+GK[ll2+filtsize]*geosub112[lxyz_en]; + } + image[lxyz][nc] = wsum; + } + } + }//end channel number move + + + delete [] geosub112; + delete [] GK; +} +void oct_gaussian_2d(double * image, int nx, int ny,double sigma) +{ + double * geosub11; + double **GK; + int filtsize = 5; // convolution filter size + bool bc; + GK = dMatrix(2*filtsize+1,2*filtsize+1); + geosub11 = new double[nx*ny]; + + GK = dMatrix(2*filtsize+1,2*filtsize+1); + oct_getGaussian(GK,2*filtsize+1,sigma); + if(GK[0][0] != GK[0][0]) + { + cout << ""Ok, Kernel Failed Here "" << sigma << endl; + exit(0); + } + for(int ii2 = 0; ii2(ii2+kk2,nx,bc); + int jjv = (int) oct_bc(jj2+ll2,ny,bc); + int lxyz_en = iiv+nx*jjv; + wsum = wsum+GK[kk2+filtsize][ll2+filtsize]*image[lxyz_en]; + } + } + //average + geosub11[lxyz] = wsum; + if( wsum != wsum) + { + for(int kk22 = -filtsize; kk22 < filtsize; ++kk22) + { + for(int ll2 = -filtsize; ll2 < filtsize; ++ll2) + { + int iiv = (int) oct_bc(ii2+kk22,nx,bc); + int jjv = (int) oct_bc(jj2+ll2,ny,bc); + int lxyz_en = iiv+nx*jjv; + cout < +#include +#include +#include +#include +#include ""image.h"" +#include ""goesread.h"" +#include ""util.h"" +#include ""oct_bc.h"" +#include ""offlags.h"" +using namespace std; +double jsose(double **geo1,double **geo2,int i, int j, int n,int m, int nx, int ny, int rad) +{ + bool bc; + double sose=0; + for(int k = 0; k < 2*rad+1; k++) + { + for(int l = 0; l < 2*rad+1; l++) + { + //i+k-rad will need boundary conditions if they are out of bounds + //note oct_bc is a boundary condition check, so ic1/jc1,ic2/jc2 will always be in bounds + int ic1 = (int) oct_bc(i+k-rad,nx,bc); + int jc1 = (int) oct_bc(j+l-rad,ny,bc); + int ic2 = (int) oct_bc(i+k+n-rad,nx,bc); + int jc2 = (int) oct_bc(j+l+m-rad,ny,bc); + double sos1 = geo2[ic2][jc2]-geo1[ic1][jc1]; + + sose += sos1*sos1; + } + } + + return sose; +} + +double jquad_interp(double y2,double y1,double y3,double x2,double x1, double x3) +{ + double result; + //Quadratic interpolation function, we are trying to find the x location of the minimum where + // y = ax ^2 + bx + c + // solving for where the derivative is 0, or when + // 0 = 2 a x + b or + // -b/2a = x + double C1 = (y2-y1)/(x2-x1); + double C2 = (x2*x2-x1*x1)/(x2-x1); + double a = (y3-C1*x3-y1+C1*x1)/(x3*x3-C2*x3-x1*x1+C2*x1); + double b = C1 - a * C2; + //double c = y1-a*pow(x1,2) - b*x1; + if(a == 0) + { + return x2; + } else + { + return -b/(2.*a); + } +} +void oct_patch_match_optical_flow(float *geo1i,float *geo2i,float *uarr,float *varr, int nx, int ny,OFFlags args) +{ + //inputs: geo1i/2i = image data from the first and second scan + // defarr2: Initial guess for where the searcher needs to center the search region + // nx: size of x dimension of the image + // ny: size of y dimension of the image + // args: an object containing the arguments input by the user + //outputs: uarr/varr, pixel displacements in geo1i dimensions + + double **geo1,**geo2; + geo1 = dMatrix(nx,ny); + geo2 = dMatrix(nx,ny); + int rad = args.rad; //important, box is 2*rad+1 x 2*rad+1 + int srad = args.srad; //Search radius around your initial guess from defarr2 + int SX = 2*srad+1; + int SY = 2*srad+1; + int SXD2 = SX/2; + int SYD2 = SY/2; + double summin,sumv,sumv1,sumv2; + int nmin,mmin; + //Un-raveling for code simplicity + for(int j = 0; j < ny; j++){ + long jtnx = j*nx; + for(int i = 0; i < nx; i++) + { + long lxyz = i+jtnx; + geo1[i][j] = geo1i[lxyz]; + geo2[i][j] = geo2i[lxyz]; + } + } + + for(int j = 0; j < ny; j++) + { + long jtnx = j*nx; + for(int i = 0; i < nx; i++) + { + long lxyz = i+jtnx; + int n = 0; + int m = 0; + int dn = 0; + int dm = -1; + bool bc; + int ibc = (int) oct_bc(i+uarr[lxyz],nx,bc); + int jbc = (int) oct_bc(j+varr[lxyz],ny,bc); + + bool sumcheck = false; + for(int ic = 0; ic< pow(max(SX,SY),2); ic++) + { + if( (-SXD2 < n <= SXD2) && (-SYD2 < m <= SYD2)) + { + sumv = jsose(geo1,geo2,ibc,jbc,n,m,nx,ny,rad); + if(sumcheck) + { + if(sumv < summin) + { + nmin = n; + mmin = m; + summin = sumv; + } + } else + { + summin = sumv; + nmin = n; + mmin = m; + sumcheck = true; + } + } + if( (n == m) || ((n < 0) && (n == -m)) || ((n > 0) && (n == 1-m))) + { + int odn = dn; + dn = -dm; + dm = odn; + } + n +=dn; + m +=dm; + } + + sumv1 = jsose(geo1,geo2,ibc,jbc,nmin+1,mmin,nx,ny,rad); + sumv2 = jsose(geo1,geo2,ibc,jbc,nmin-1,mmin,nx,ny,rad); + + if((summin < sumv1) && (summin < sumv2)) + { + uarr[lxyz] = jquad_interp(summin,sumv1,sumv2,(double) (i+nmin),(double) (i+nmin+1),(double)(i+nmin-1))-(double) i; + } else { + uarr[lxyz] = nmin; + } + sumv1 = jsose(geo1,geo2,ibc,jbc,nmin,mmin+1,nx,ny,rad); + sumv2 = jsose(geo1,geo2,ibc,jbc,nmin,mmin-1,nx,ny,rad); + if((summin < sumv1) && (summin < sumv2)) + { + varr[lxyz] = jquad_interp(summin,sumv1,sumv2,(double) (j+mmin),(double) (j+mmin+1),(double)(j+mmin-1))- (double) j; + } else{ + varr[lxyz] = mmin; + } + + + } + } + free_dMatrix(geo1,nx); + free_dMatrix(geo2,nx); +} +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/oct_binterp.cc",".cc","1401","42","#include +#include +#include +using namespace std; + +//Function: oct_binterp +//Purpose: This is a C++ function to perform bilinear interpolation +//Returns: Bilinear interpolation of value at point x, y as a double, coefs and coef_binterp are functions which +// keep and use all coefs for computational efficiency when needed +// +//Author: Jason Apke, Updated 9/10/2018 +double oct_binterp (double x, double y,double x1, double x2, double y1, double y2,double f11,double f21,double f12,double f22) +{ + double fv1,fv2,ans; + double p1 = (x2-x)/(x2-x1); + double p2 = (x-x1)/(x2-x1); + fv1 = (p1)*f11+(p2)*f21; + fv2 = (p1)*f12+(p2)*f22; + ans = ((y2-y)/(y2-y1))*fv1+((y-y1)/(y2-y1))*fv2; + return ans; +} +double oct_binterp_coefs (double x, double y,double x1, double x2, double y1, double y2,double f11,double f21,double f12,double f22,double & p1, double & p2,double & p3,double & p4) +{ + double fv1,fv2,ans; + p1 = (x2-x)/(x2-x1); + p2 = (x-x1)/(x2-x1); + fv1 = (p1)*f11+(p2)*f21; + fv2 = (p1)*f12+(p2)*f22; + p3 = ((y2-y)/(y2-y1)); + p4 = ((y-y1)/(y2-y1)); + ans = p3*fv1+p4*fv2; + return ans; +} +double oct_coef_binterp(double p1, double p2, double p3, double p4, double f11,double f21,double f12,double f22) +{ + double fv1, fv2, ans; + fv1 = (p1)*f11+(p2)*f21; + fv2 = (p1)*f12+(p2)*f22; + ans = p3*fv1+p4*fv2; + return ans; +} +","Unknown" +"Nowcasting","JasonApke/OCTANE","src/oct_normalize_geo.cc",".cc","2787","103","#include +#include +#include +using namespace std; +//Function: oct_normalize_geo +//Purpose: This function normalizes a geo image to values between maxout and minout +//Author: Jason Apke, Updated 9/10/2018 + +void oct_bandminmax(int gb, float &maxch,float &minch) +{ + if(gb == 1) + { + maxch = 804.03605737; + minch = -25.93664701; + } else if(gb == 2) + { + maxch = 628.98723908; + minch = -20.28991094; + } else if(gb == 3) + { + maxch = 373.16695681; + minch = -12.03764377; + } else if(gb == 4) + { + maxch = 140.19342584; + minch = -4.52236858; + } else if(gb == 5) + { + maxch = 94.84802665; + minch = -3.05961376; + } else if(gb == 6) + { + maxch = 29.78947040; + minch = -0.96095066; + } else if(gb == 7) + { + //maxch = 24.962; + //minch = -0.0114; + //Above is actual range + //below is meteorological range + maxch = 2.; + minch = 0.; + } else if(gb == 8) + { + //These are documented + //maxch = 28.366; + //minch = -0.1692; + //This is an experimental meteorological range + maxch = 6.; + minch = 3.; + } else if(gb == 9) + { + maxch = 44.998; + minch = -0.2472; + } else if(gb == 10) + { + maxch = 79.831; + minch = -0.2871; + } else if(gb == 10) + { + maxch = 79.831; + minch = -0.2871; + } else if(gb == 11) + { + maxch = 134.93; + minch = -0.3909; + } else if(gb == 12) + { + maxch = 108.44; + minch = -0.4617; + } else if(gb == 13) + { + maxch = 185.5699; + minch = -1.6443; + } else if(gb == 14) + { + maxch = 198.71; + minch = -0.5154; + } else if(gb == 15) + { + maxch = 212.28; + minch = -0.5262; + } else if(gb == 16) + { + maxch = 170.19; + minch = -1.5726; + } +} +//Note, the normalize function does not censor above or below the min/max, just scales the data such that +//maxin = maxout, minin = minout +void oct_normalize_geo(double ** image, double maxin, double minin,double maxout,double minout,int nx,int ny,int nc) +{ + for (int jj2 = 0; jj2np.sum(nan_mask): + s = np.sum(nan_mask) + else: + s = sample_size + + sample_idx = np.random.choice(np.sum(nan_mask), + replace=False, + size=s) + yhat_ = yhat_map[:, nan_mask][:, sample_idx] + y_ = y_map[nan_mask][sample_idx] + ub = np.nanquantile(yhat_, 1 - (1 - ci) / 2, axis=0) + lb = np.nanquantile(yhat_, (1 - ci) / 2, axis=0) + cond = (y_ <= ub) & (y_ >= lb) + return np.sum(cond) / s, np.mean(ub - lb) + + +def compute_CRPS(yhat_map, + y_map): + pred = yhat_map.reshape(yhat_map.shape[0], + yhat_map.shape[1], + -1).T + + obs = y_map.reshape(y_map.shape[0], + -1).T + crps = ps.crps_ensemble(obs, pred).T.reshape(y_map.shape) + return crps + + +def compute_fss(yhat_map, + y_map, + thresh, + scale, + inverse=None): + if inverse is None: + return fss(yhat_map, + y_map, + thr=thresh, + scale=scale) + else: + return fss(inverse-yhat_map, + inverse-y_map, + thr=inverse-thresh, + scale=scale) + + + +def compute_CSI(yhat_map, + y_map, + thresh, + inverse=None): + if inverse is None: + return det_cat_fct(yhat_map, + y_map, + thr=thresh, + scores='CSI')['CSI'] + else: + return det_cat_fct(inverse-yhat_map, + inverse-y_map, + thr=inverse-thresh, + scores='CSI')['CSI'] + + +def compute_rmse(yhat_map, + y_map): + return np.sqrt(np.nanmean((yhat_map - y_map) ** 2)) + + +def compute_bias(yhat_map, + y_map): + diff = yhat_map - y_map + return np.nanmean(diff), np.nanmax(diff), np.nanmin(diff) + +def compute_dist_distance(yhat, y, mmd, idx=None): + # compute mmd distance for two sequences of images images + if idx is not None: + yhat = yhat[:, idx, idx] + y = y[:, idx, idx] + if not isinstance(yhat, torch.Tensor): + yhat = torch.Tensor(yhat) + y = torch.Tensor(y) + mmd_lst = [] + for yhat_, y_ in zip(yhat, y): + mmd_lst.append(mmd(yhat_.view(-1,1), y_.view(-1,1)).detach().numpy()) + return np.array(mmd_lst) + +def compute_ens_dist_distance(ens_yhat, y, mmd, idx=None): + # compute mmd distance for an ensemble of forecasts + d_lst = [] + for yhat in ens_yhat: + d = compute_dist_distance(yhat, y, mmd, idx) + d_lst.append(d) + return np.nanmean(d_lst, axis=0), np.nanstd(d_lst, axis=0) + +def compute_ensemble_metrics(yhat, + real, + metrics=['crps', 'picp-pinaw', 'rmse', 'csi-fss', 'mmd'], + picp_sample_size=1000, + confidence_interval=0.9, + scale_lst=(1, 2, 4, 8, 16, 32, 64), + threshold_lst=(0.3, 0.6, 0.9), + inverse_lst=[1.2, None, None], + mmd_idx=np.arange(0,128,2), + mmd=None, + rankhist_dict={}): + result_dict = {} + + y = real.copy() + y[np.isnan(yhat[0])] = np.nan + # PICP and PINAW + if 'picp-pinaw' in metrics: + picp_pinaw = [compute_picp_pinaw(yhat[:, j], + y[j], + sample_size=picp_sample_size, + ci=confidence_interval) for j in range(len(y))] + picp = np.array(picp_pinaw)[:, 0] + pinaw = np.array(picp_pinaw)[:, 1] + result_dict['picp'] = picp + result_dict['pinaw'] = pinaw + + if 'crps' in metrics: + crps_maps = [compute_CRPS(yhat[:, j], + y[j]) for j in range(len(y))] + result_dict['crps_map'] = crps_maps + result_dict['avg_crps'] = np.nanmean(crps_maps, axis=(1, 2)) + + if 'rmse' in metrics: + rmse = np.sqrt(np.nanmean((np.nanmean(yhat, axis=0)-y)**2, axis=(1,2))) + result_dict['rmse'] = np.array(rmse) + + if 'bias' in metrics: + bias = np.array([compute_bias(np.nanmean(yhat[:, j], axis=0), + y[j]) for j in range(len(y))]) + result_dict['avg_bias'] = bias[:, 0] + result_dict['max_bias'] = bias[:, 1] + result_dict['min_bias'] = bias[:, 2] + + if 'csi' in metrics: + csi_dict = {} + for t,inv, in zip(threshold_lst, inverse_lst): + csi_lst = [] + for yhat_ in yhat: + csi = np.array([compute_CSI(yhat_[j], + y[j], + t, + inverse=inv) for j in range(len(y))]) + csi_lst.append(csi) + csi_dict[t] = (np.nanmean(csi_lst, axis=0), np.nanstd(csi_lst, axis=0)) + result_dict['csi'] = csi_dict + + if 'fss' in metrics: + fss_dict = {} + for t,inv, in zip(threshold_lst, inverse_lst): + fss_dict[t] = {} + for scale in scale_lst: + fss_lst = [] + for yhat_ in yhat: + fs_score = np.array([compute_fss(yhat_[j], + y[j], + t, + inverse=inv, + scale=scale) for j in range(len(y))]) + fss_lst.append(fs_score) + fss_dict[t][scale] = (np.nanmean(fss_lst, axis=0), np.nanstd(fss_lst, axis=0)) + result_dict['fss'] = fss_dict + + if 'mmd' in metrics: + mmd_loss = compute_ens_dist_distance(yhat, y, mmd, mmd_idx) + result_dict['mmd'] = mmd_loss + + if 'rankhist' in metrics: + for step in range(yhat.shape[1]): + verification.rankhist_accum(rankhist_dict[step], yhat[:,step], y[step]) + + if 'spread-skill' in metrics: + rmse = np.sqrt((np.nanmean(yhat, axis=0)-y)**2) + sd = np.std(yhat, axis=0) + skill = np.nanmean(rmse/sd, axis=(1,2)) + result_dict['spread-skill'] = skill + return result_dict + +def compute_rankhist(rankhist_dict, yhat, y): + for step in range(yhat.shape[1]): + verification.rankhist_accum(rankhist_dict[step], yhat[:,step], y[step]) + +def compute_det_metrics(yhat, + y, + scale_lst=(1, 2, 4, 8, 16, 32, 64), + threshold_lst=(0.3, 0.6, 0.9)): + result_dict = {} + rmse = [compute_rmse(yhat[j], + y[j]) for j in range(len(y))] + bias = np.array([compute_bias(yhat[j], + y[j]) for j in range(len(y))]) + result_dict['rmse'] = np.array(rmse) + result_dict['avg_bias'] = bias[:, 0] + result_dict['max_bias'] = bias[:, 1] + result_dict['min_bias'] = bias[:, 2] + + csi_dict = {} + fss_dict = {} + for t in threshold_lst: + csi = np.array([compute_CSI(yhat[j], + y[j], + t) for j in range(len(y))]) + csi_dict[t] = csi + fss_dict[t] = {} + for scale in scale_lst: + fs_score = np.array([compute_fss(yhat[j], + y[j], + t, + scale) for j in range(len(y))]) + fss_dict[t][scale] = fs_score + result_dict['csi'] = csi_dict + result_dict['fss'] = fss_dict + return result_dict + + +def init_reldiagrams(thresh_lst): + reldiag_dict = {} + for t in thresh_lst: + reldiag_dict[t] = reldiag_init(t) + return reldiag_dict + + +def accum_reldiagrams(yhat, + y, + reldiag_dict): + for t in reldiag_dict: + prob = ensemblestats.excprob(yhat, t, ignore_nan=True) + reldiag_accum(reldiag_dict[t], prob, y) +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","validation_utils.py",".py","5805","120","from Dataset.dataset import KIDataset +from torch.utils.data import DataLoader +from SHADECast.Models.Nowcaster.Nowcast import AFNONowcastNetCascade, Nowcaster, AFNONowcastNet + +from SHADECast.Models.VAE.VariationalAutoEncoder import VAE, Encoder, Decoder +from SHADECast.Models.UNet.UNet import UNetModel +from SHADECast.Models.Diffusion.DiffusionModel import LatentDiffusion +from utils import open_pkl, save_pkl + +def get_dataloader(data_path, + coordinate_data_path, + n=12, + min=0.05, + max=1.2, + length=None, + norm_method='rescaling', + num_workers=24, + batch_size=64, + shuffle=True, + validation=False): + dataset = KIDataset(data_path=data_path, + n=n, + min=min, + max=max, + length=length, + norm_method=norm_method, + coordinate_data_path=coordinate_data_path, + return_all=False, + forecast=True, + validation=validation) + dataloader = DataLoader(dataset, + num_workers=num_workers, + batch_size=batch_size, + shuffle=shuffle) + return dataloader, dataset + + +def get_diffusion_model(config_path, ldm_path): + + config = open_pkl(config_path) + encoder_config = config['Encoder'] + encoder = Encoder(in_dim=encoder_config['in_dim'], + levels=encoder_config['levels'], + min_ch=encoder_config['min_ch'], + max_ch=encoder_config['max_ch']) + print('Encoder built') + + decoder_config = config['Decoder'] + decoder = Decoder(in_dim=decoder_config['in_dim'], + levels=decoder_config['levels'], + min_ch=decoder_config['min_ch'], + max_ch=decoder_config['max_ch']) + + print('Decoder built') + + vae_config = config['VAE'] + vae = VAE.load_from_checkpoint(vae_config['path'], + encoder=encoder, decoder=decoder, + opt_patience=5) + + print('VAE built') + + nowcaster_config = config['Nowcaster'] + if nowcaster_config['path'] is None: + nowcast_net = AFNONowcastNet(vae, + train_autoenc=False, + embed_dim=nowcaster_config['embed_dim'], + embed_dim_out=nowcaster_config['embed_dim'], + analysis_depth=nowcaster_config['analysis_depth'], + forecast_depth=nowcaster_config['forecast_depth'], + input_steps=nowcaster_config['input_steps'], + output_steps=nowcaster_config['output_steps'], + ) + else: + nowcast_net = AFNONowcastNet(vae, + train_autoenc=False, + embed_dim=nowcaster_config['embed_dim'], + embed_dim_out=nowcaster_config['embed_dim'], + analysis_depth=nowcaster_config['analysis_depth'], + forecast_depth=nowcaster_config['forecast_depth'], + input_steps=nowcaster_config['input_steps'], + output_steps=nowcaster_config['output_steps'], + ) + nowcaster = Nowcaster.load_from_checkpoint(nowcaster_config['path'], nowcast_net=nowcast_net, + opt_patience=nowcaster_config['opt_patience'], + loss_type=nowcaster_config['loss_type']) + nowcast_net = nowcaster.nowcast_net + + cascade_net = AFNONowcastNetCascade(nowcast_net=nowcast_net, + cascade_depth=nowcaster_config['cascade_depth']) + diffusion_config = config['Diffusion'] + denoiser = UNetModel( + in_channels=vae.hidden_width, + model_channels=diffusion_config['model_channels'], + out_channels=vae.hidden_width, + num_res_blocks=diffusion_config['num_res_blocks'], + attention_resolutions=diffusion_config['attention_resolutions'], + dims=diffusion_config['dims'], + channel_mult=diffusion_config['channel_mult'], + num_heads=8, + num_timesteps=2, + context_ch=cascade_net.cascade_dims) + + ldm = LatentDiffusion.load_from_checkpoint(ldm_path, + model=denoiser, + autoencoder=vae, + context_encoder=cascade_net, + beta_schedule=diffusion_config['scheduler'], + loss_type=""l2"", + use_ema=diffusion_config['use_ema'], + lr_warmup=0, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + parameterization='eps', + lr=diffusion_config['lr'], + timesteps=diffusion_config['noise_steps'], + opt_patience=diffusion_config['opt_patience'] + ) + return ldm, config","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","utils.py",".py","7587","224","import pickle as pkl +import numpy as np +import yaml +import torch +import torch.nn as nn + +ROOT_PATH = '/Users/cea3/Desktop/Projects/GenerativeModels/' + +def open_pkl(path: str): + with open(path, 'rb') as o: + pkl_file = pkl.load(o) + return pkl_file + + +def save_pkl(path: str, obj): + with open(path, 'wb') as o: + pkl.dump(obj, o) + + +def open_yaml(path: str): + with open(path) as o: + yaml_file = yaml.load(o, Loader=yaml.FullLoader) + return yaml_file + + +def activation(act_type=""swish""): + act_dict = {""swish"": nn.SiLU(), + ""gelu"": nn.GELU(), + ""relu"": nn.ReLU(), + ""tanh"": nn.Tanh()} + if act_type: + if act_type in act_dict: + return act_dict[act_type] + else: + raise NotImplementedError(act_type) + elif not act_type: + return nn.Identity() + + +def normalization(channels, norm_type=""group"", num_groups=32): + if norm_type == ""batch"": + return nn.BatchNorm3d(channels) + elif norm_type == ""group"": + return nn.GroupNorm(num_groups=num_groups, num_channels=channels) + elif (not norm_type) or (norm_type.lower() == 'none'): + return nn.Identity() + else: + raise NotImplementedError(norm_type) + + +def kl_from_standard_normal(mean, log_var): + kl = 0.5 * (log_var.exp() + mean.square() - 1.0 - log_var) + return kl.mean() + + +def sample_from_standard_normal(mean, log_var, num=None): + std = (0.5 * log_var).exp() + shape = mean.shape + if num is not None: + # expand channel 1 to create several samples + shape = shape[:1] + (num,) + shape[1:] + mean = mean[:, None, ...] + std = std[:, None, ...] + return mean + std * torch.randn(shape, device=mean.device) + + +# def get_full_images(date, val_data_path, coordinate_data_path, n_patches=18): + +# patches = [] +# times = [] +# coords = [] +# starting_idx_lst = [] +# for i in range(n_patches): +# patch = open_pkl(val_data_path+date+'_'+str(i)+'.pkl') +# lat = open_pkl(coordinate_data_path+str(i)+'_lat.pkl') +# lon = open_pkl(coordinate_data_path+str(i)+'_lon.pkl') +# maps = 2 * ((patch['ki_maps'] - 0.05) / (1.2 - 0.05)) - 1 +# patches.append(maps) +# t = 2 * ((patch['sza'] - 0) / (90 - 0)) - 1 +# times.append(t) +# lon = 2 * ((lon - 0) / (90 - 0)) - 1 +# lat = 2 * ((lat - 0) / (90 - 0)) - 1 +# coords.append((lon, lat)) +# starting_idx_lst.append(patch['starting_idx']) +# common_starting_idx_lst = list(set.intersection(*map(set, starting_idx_lst))) +# patches = np.array(patches) +# patches = patches[:, 4:] +# times = np.array(times) +# times = np.nanmean(times[:, 4:], axis=0) + +# full_image = np.empty((patches.shape[1], 128*3, 128*6)) +# full_lat = np.empty((128*3, 128*6)) +# full_lon = np.empty((128*3, 128*6)) + +# k = 0 +# for i in range(3): +# for j in range(6): +# full_image[:, 128*i:128*(i+1), 128*j:128*(j+1)] = patches[k] +# full_lat[128*i:128*(i+1), 128*j:128*(j+1)] = coords[k][1] +# full_lon[128*i:128*(i+1), 128*j:128*(j+1)] = coords[k][0] +# k += 1 +# return full_image, full_lat, full_lon, times, common_starting_idx_lst + +patch_dict = {0: ((0, 128), (0, 128)), + 1: ((0, 128), (128, 256)), + 2: ((0, 128), (256, 384)), + 3: ((0, 128), (384, 512)), + 4: ((0, 128), (512, 640)), + 5: ((0, 128), (640, 768)), + 6: ((128, 256), (0, 128)), + 7: ((128, 256), (128, 256)), + 8: ((128, 256), (256, 384)), + 9: ((128, 256), (384, 512)), + 10: ((128, 256), (512, 640)), + 11: ((128, 256), (640, 768)), + 12: ((256, 384), (0, 128)), + 13: ((256, 384), (128, 256)), + 14: ((256, 384), (256, 384)), + 15: ((256, 384), (384, 512)), + 16: ((256, 384), (512, 640)), + 17: ((256, 384), (640, 768))} + +def get_full_images(date, + data_path='/scratch/snx3000/acarpent/HelioMontDataset/TestSet/KI/', + patches_idx=np.arange(18)): + + full_maps = np.empty((100, 128*3, 128*6))*np.nan + patches_lst = [] + starting_idx_lst = [] + starting_idx_lst = set(np.arange(100)) + + for p in patches_idx: + patch = open_pkl(data_path+date+'_'+str(p)+'.pkl') + maps = 2 * ((patch['ki_maps'] - 0.05) / (1.2 - 0.05)) - 1 + full_maps[:len(maps), patch_dict[p][0][0]:patch_dict[p][0][1], + patch_dict[p][1][0]:patch_dict[p][1][1]] = maps + starting_idx_lst = starting_idx_lst.intersection(set(patch['starting_idx'])) + + + time = patch['time'] + full_maps = full_maps[:len(time)] + x = ~np.isnan(full_maps).all(axis=(0, 2)) + full_maps = full_maps[:, x] + y = ~np.isnan(full_maps).all(axis=(0, 1)) + full_maps = full_maps[:, :, y] + + return full_maps, starting_idx_lst, time + +def get_full_coordinates(data_path='/scratch/snx3000/acarpent/HelioMontDataset/CoordinateData/', + patches_idx=np.arange(18), + normalization=False): + + full_lat = np.empty((128*3, 128*6))*np.nan + full_lon = np.empty((128*3, 128*6))*np.nan + full_alt = np.empty((128*3, 128*6))*np.nan + for p in patches_idx: + lat = open_pkl(data_path+str(p)+'_lat.pkl') + lon = open_pkl(data_path+str(p)+'_lon.pkl') + alt = open_pkl(data_path+str(p)+'_alt.pkl') + full_lat[patch_dict[p][0][0]:patch_dict[p][0][1], + patch_dict[p][1][0]:patch_dict[p][1][1]] = lat + full_lon[patch_dict[p][0][0]:patch_dict[p][0][1], + patch_dict[p][1][0]:patch_dict[p][1][1]] = lon + full_alt[patch_dict[p][0][0]:patch_dict[p][0][1], + patch_dict[p][1][0]:patch_dict[p][1][1]] = alt + + x = ~np.isnan(full_lat).all(axis=(0)) + full_lat = full_lat[:, x] + full_lon = full_lon[:, x] + full_alt = full_alt[:, x] + + y = ~np.isnan(full_lat).all(axis=(1)) + full_lat = full_lat[y, :] + full_lon = full_lon[y, :] + full_alt = full_alt[y, :] + + if normalization: + full_lon = 2 * ((full_lon - 0) / (90 - 0)) - 1 + full_lat = 2 * ((full_lat - 0) / (90 - 0)) - 1 + full_alt = 2 * ((full_alt - (-13)) / (4294 - 0)) - 1 + return full_lat, full_lon, full_alt + + +def compute_prob(arr, thresh, mean=True): + x = arr.copy() + x[x=thresh] = 1 + if mean: + return np.nanmean(x, axis=0) + else: + return x + + +def remap(x, max_value=1.2, min_value=0.05): + return ((x+1)/2)*(max_value-min_value) + min_value + + +def nonparametric_cdf_transform(initial_array, target_array, alpha): + # flatten the arrays + arrayshape = initial_array.shape + target_array = target_array.flatten() + initial_array = initial_array.flatten() + # extra_array = extra_array.flatten() + + # rank target values + order = target_array.argsort() + target_ranked = target_array[order] + + # rank initial values order + orderin = initial_array.argsort() + ranks = np.empty(len(initial_array), int) + ranks[orderin] = np.arange(len(initial_array)) + + # # rank extra array + orderex = initial_array.argsort() + extra_ranked = initial_array[orderex] + + # get ranked values from target and rearrange with the initial order + ranked = alpha*extra_ranked + (1-alpha)*target_ranked + output_array = ranked[ranks] + + # reshape to the original array dimensions + output_array = output_array.reshape(arrayshape) + return output_array","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Training/SHADECastTraining.py",".py","10684","244","import torch +from torch.utils.data import DataLoader +import matplotlib.pyplot as plt +import numpy as np +from torchinfo import summary +import pytorch_lightning as pl +from pytorch_lightning import seed_everything +from torch.utils.data import DataLoader +from yaml import load, Loader +from torchinfo import summary +import os +from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping + +from utils import save_pkl +from Dataset.dataset import KIDataset +from Models.Nowcaster.Nowcast import AFNONowcastNetCascade, Nowcaster, AFNONowcastNet +from Models.VAE.VariationalAutoEncoder import VAE, Encoder, Decoder +from Models.UNet.UNet import UNetModel +from Models.Diffusion.DiffusionModel import LatentDiffusion + + +def get_dataloader(data_path, + coordinate_data_path, + n=12, + min=0.05, + max=1.2, + length=None, + norm_method='rescaling', + num_workers=24, + batch_size=64, + shuffle=True, + validation=False, + return_t=False): + dataset = KIDataset(data_path=data_path, + n=n, + min=min, + max=max, + length=length, + norm_method=norm_method, + coordinate_data_path=coordinate_data_path, + return_all=False, + forecast=True, + validation=validation, + return_t=return_t) + dataloader = DataLoader(dataset, + num_workers=num_workers, + batch_size=batch_size, + shuffle=shuffle) + return dataloader + + +def train(config, distributed=True): + if distributed: + num_nodes = int(os.environ['SLURM_NNODES']) + rank = int(os.environ['SLURM_NODEID']) + print(rank, num_nodes) + else: + rank = 0 + num_nodes = 1 + + ID = config['ID'] + save_pkl(config['Checkpoint']['dirpath'] + ID + '_config.pkl', config) + if rank == 0: + print(config) + + encoder_config = config['Encoder'] + encoder = Encoder(in_dim=encoder_config['in_dim'], + levels=encoder_config['levels'], + min_ch=encoder_config['min_ch'], + max_ch=encoder_config['max_ch']) + if rank == 0: + print('Encoder built') + + decoder_config = config['Decoder'] + decoder = Decoder(in_dim=decoder_config['in_dim'], + levels=decoder_config['levels'], + min_ch=decoder_config['min_ch'], + max_ch=decoder_config['max_ch']) + if rank == 0: + print('Decoder built') + + vae_config = config['VAE'] + vae = VAE.load_from_checkpoint(vae_config['path'], + encoder=encoder, decoder=decoder, + opt_patience=vae_config['opt_patience']) + if rank == 0: + print('VAE built') + + nowcaster_config = config['Nowcaster'] + print(nowcaster_config['path']) + if nowcaster_config['path'] is None: + nowcast_net = AFNONowcastNet(vae, + train_autoenc=False, + embed_dim=nowcaster_config['embed_dim'], + embed_dim_out=nowcaster_config['embed_dim'], + analysis_depth=nowcaster_config['analysis_depth'], + forecast_depth=nowcaster_config['forecast_depth'], + input_steps=nowcaster_config['input_steps'], + output_steps=nowcaster_config['output_steps'], + # opt_patience=nowcaster_config['opt_patience'], + # loss_type=nowcaster_config['loss_type'] + ) + train_nowcast = True + else: + nowcast_net = AFNONowcastNet(vae, + train_autoenc=False, + embed_dim=nowcaster_config['embed_dim'], + embed_dim_out=nowcaster_config['embed_dim'], + analysis_depth=nowcaster_config['analysis_depth'], + forecast_depth=nowcaster_config['forecast_depth'], + input_steps=nowcaster_config['input_steps'], + output_steps=nowcaster_config['output_steps'], + # opt_patience=nowcaster_config['opt_patience'], + # loss_type=nowcaster_config['loss_type'] + ) + nowcaster = Nowcaster.load_from_checkpoint(nowcaster_config['path'], nowcast_net=nowcast_net, + opt_patience=nowcaster_config['opt_patience'], + loss_type=nowcaster_config['loss_type']) + nowcast_net = nowcaster.nowcast_net + train_nowcast = False + + print('Nowcaster built, train: ', nowcaster_config['path']) + cascade_net = AFNONowcastNetCascade(nowcast_net=nowcast_net, + cascade_depth=nowcaster_config['cascade_depth'], + train_net=train_nowcast) + if rank == 0: + summary(nowcast_net) + # if nowcaster_config['path'] is not None: + # nowcaster = Nowcaster.load_from_checkpoint(nowcaster_config['path'], + # nowcast_net=nowcast_net, + # autoencoder=vae) + if rank == 0: + print('Nowcaster built') + + diffusion_config = config['Diffusion'] + denoiser = UNetModel( + in_channels=vae.hidden_width, + model_channels=diffusion_config['model_channels'], + out_channels=vae.hidden_width, + num_res_blocks=diffusion_config['num_res_blocks'], + attention_resolutions=diffusion_config['attention_resolutions'], + dims=diffusion_config['dims'], + channel_mult=diffusion_config['channel_mult'], + num_heads=8, + num_timesteps=2, + context_ch=cascade_net.cascade_dims) + + ldm = LatentDiffusion(model=denoiser, + autoencoder=vae, + context_encoder=cascade_net, + beta_schedule=diffusion_config['scheduler'], + loss_type=""l2"", + use_ema=diffusion_config['use_ema'], + lr_warmup=0, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + parameterization='eps', + lr=diffusion_config['lr'], + timesteps=diffusion_config['noise_steps'], + opt_patience=diffusion_config['opt_patience'], + get_t=config['Dataset']['get_t'], + ) + if rank == 0: + print('All models built') + summary(ldm) + + ckpt_config = config['Checkpoint'] + checkpoint_callback = ModelCheckpoint( + monitor=ckpt_config['monitor'], + dirpath=ckpt_config['dirpath'], + filename=ID + '_' + ckpt_config['filename'], + save_top_k=ckpt_config['save_top_k'], + every_n_epochs=ckpt_config['every_n_epochs'] + ) + + early_stop_callback = EarlyStopping(monitor=ckpt_config['monitor'], + patience=config['EarlyStopping']['patience']) + + tr_config = config['Trainer'] + trainer = pl.Trainer( + default_root_dir=ckpt_config['dirpath'], + accelerator=tr_config['accelerator'], + devices=tr_config['devices'], + num_nodes=num_nodes, + max_epochs=tr_config['max_epochs'], + callbacks=[checkpoint_callback, early_stop_callback], + strategy=tr_config['strategy'], + precision=tr_config['precision'], + enable_progress_bar=(rank == 0), + deterministic=False, + accumulate_grad_batches=tr_config['accumulate_grad_batches'] + ) + if rank == 0: + print('Trainer built') + data_config = config['Dataset'] + train_dataloader = get_dataloader(data_path=data_config['data_path'] + 'TrainingSet/KI/', + coordinate_data_path=data_config['data_path'] + 'CoordinateData/', + n=data_config['n_in'] + data_config['n_out'], + min=data_config['min'], + max=data_config['max'], + length=data_config['train_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=True, + validation=False) + + val_dataloader = get_dataloader(data_path=data_config['data_path'] + 'ValidationSet/KI/', + coordinate_data_path=data_config['data_path'] + 'CoordinateData/', + n=data_config['n_in'] + data_config['n_out'], + min=data_config['min'], + max=data_config['max'], + length=data_config['val_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=False, + validation=True, + return_t=data_config['get_t']) + if rank == 0: + print('Training started') + resume_training = tr_config['resume_training'] + torch.cuda.empty_cache() + if resume_training is None: + trainer.fit(ldm, train_dataloader, val_dataloader) + else: + # if tr_config['resume_training'] is False: + trainer.fit(ldm, train_dataloader, val_dataloader, + ckpt_path=resume_training) + # else: + + + +if __name__ == '__main__': + with open('SHADECastTrainingconf.yml', + 'r') as o: + config = load(o, Loader) + seed = config['seed'] + if seed is not None: + seed_everything(int(seed), workers=True) + + train(config) +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Training/Nowcast_training/NowcasterTraining_pl.py",".py","7230","178","import os +from torchinfo import summary +import pytorch_lightning as pl +from torch.utils.data import DataLoader +from yaml import load, Loader +from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping +# from pytorch_lightning import seed_everything +from Models.VAE.VariationalAutoEncoder import Encoder, Decoder, VAE +from Models.Nowcaster.Nowcast import AFNONowcastNet, Nowcaster +from Dataset.dataset import KIDataset +from utils import save_pkl + + +def get_dataloader(data_path, + coordinate_data_path, + n=12, + min=0.05, + max=1.2, + length=None, + norm_method='rescaling', + num_workers=24, + batch_size=64, + shuffle=True, + validation=False): + dataset = KIDataset(data_path=data_path, + n=n, + min=min, + max=max, + length=length, + norm_method=norm_method, + coordinate_data_path=coordinate_data_path, + return_all=False, + forecast=True, + validation=validation) + dataloader = DataLoader(dataset, + num_workers=num_workers, + batch_size=batch_size, + shuffle=shuffle) + return dataloader + + + +def train(config, distributed=True): + if distributed: + num_nodes = int(os.environ['SLURM_NNODES']) + rank = int(os.environ['SLURM_NODEID']) + print(rank, num_nodes) + else: + rank = 0 + num_nodes = 1 + + ID = config['ID'] + save_pkl(config['Checkpoint']['dirpath'] + ID + '_config.pkl', config) + if rank == 0: + print(config) + + encoder_config = config['Encoder'] + encoder = Encoder(in_dim=encoder_config['in_dim'], + levels=encoder_config['levels'], + min_ch=encoder_config['min_ch'], + max_ch=encoder_config['max_ch']) + if rank == 0: + print('Encoder built') + + decoder_config = config['Decoder'] + decoder = Decoder(in_dim=decoder_config['in_dim'], + levels=decoder_config['levels'], + min_ch=decoder_config['min_ch'], + max_ch=decoder_config['max_ch']) + if rank == 0: + print('Decoder built') + + vae_config = config['VAE'] + if vae_config['path'] is not None: + vae = VAE.load_from_checkpoint(vae_config['path'], + encoder=encoder, decoder=decoder) + train_autoencoder = False + else: + vae = VAE(encoder, + decoder, + kl_weight=vae_config['kl_weight'], + encoded_channels=encoder_config['max_ch'], + hidden_width=vae_config['hidden_width']) + train_autoencoder = True + if rank == 0: + print('VAE built') + + nowcaster_config = config['Nowcaster'] + if rank == 0: + print(nowcaster_config) + + nowcast_net = AFNONowcastNet(vae, + train_autoenc=train_autoencoder, + embed_dim=nowcaster_config['embed_dim'], + embed_dim_out=nowcaster_config['embed_dim'], + analysis_depth=nowcaster_config['analysis_depth'], + forecast_depth=nowcaster_config['forecast_depth'], + input_steps=nowcaster_config['input_steps'], + output_steps=nowcaster_config['output_steps']) + nowcaster = Nowcaster(nowcast_net=nowcast_net, + opt_patience=nowcaster_config['opt_patience'], + loss_type=nowcaster_config['loss_type']) + + if rank == 0: + print('All models built') + summary(nowcaster) + + ckpt_config = config['Checkpoint'] + checkpoint_callback = ModelCheckpoint( + monitor=ckpt_config['monitor'], + dirpath=ckpt_config['dirpath'], + filename=ID + '_' + ckpt_config['filename'], + save_top_k=ckpt_config['save_top_k'], + every_n_epochs=ckpt_config['every_n_epochs'] + ) + + early_stop_callback = EarlyStopping(monitor=ckpt_config['monitor'], + patience=config['EarlyStopping']['patience']) + + tr_config = config['Trainer'] + trainer = pl.Trainer( + default_root_dir=ckpt_config['dirpath'], + accelerator=tr_config['accelerator'], + devices=tr_config['devices'], + num_nodes=num_nodes, + max_epochs=tr_config['max_epochs'], + callbacks=[checkpoint_callback, early_stop_callback], + strategy=tr_config['strategy'], + precision=tr_config['precision'], + enable_progress_bar=(rank == 0), + accumulate_grad_batches=tr_config['accumulate_grad_batches'] + # deterministic=False + ) + if rank == 0: + print('Trainer built') + data_config = config['Dataset'] + train_dataloader = get_dataloader(data_path=data_config['data_path'] + 'TrainingSet/KI/', + coordinate_data_path=data_config['data_path'] + 'CoordinateData/', + n=data_config['n_in'] + data_config['n_out'], + min=data_config['min'], + max=data_config['max'], + length=data_config['train_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=True, + validation=False) + + val_dataloader = get_dataloader(data_path=data_config['data_path'] + 'ValidationSet/KI/', + coordinate_data_path=data_config['data_path'] + 'CoordinateData/', + n=data_config['n_in'] + data_config['n_out'], + min=data_config['min'], + max=data_config['max'], + length=data_config['val_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=False, + validation=True) + if rank == 0: + print('Training started') + resume_training = tr_config['resume_training'] + if resume_training is None: + trainer.fit(nowcaster, train_dataloader, val_dataloader) + else: + trainer.fit(nowcaster, train_dataloader, val_dataloader, + ckpt_path=resume_training) + + +if __name__ == '__main__': + with open('Training/Nowcast_training/Nowcastertrainingconf.yml', + 'r') as o: + config = load(o, Loader) + + # seed_everything(0, workers=0) + + train(config) +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Training/Nowcast_training/IrradianceNetTraining_pl.py",".py","5644","140","import os +from torchinfo import summary +import pytorch_lightning as pl +from torch.utils.data import DataLoader +from yaml import load, Loader +from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping +# from pytorch_lightning import seed_everything +# from Models.VAE.VariationalAutoEncoder import Encoder, Decoder, VAE +# from Models.Nowcaster.Nowcast import AFNONowcastNet, Nowcaster +from Benchmark.IrradianceNet import ConvLSTM_patch, IrradianceNet +from Dataset.dataset import KIDataset +from utils import save_pkl + + +def get_dataloader(data_path, + coordinate_data_path, + n=12, + min=0.05, + max=1.2, + length=100, + norm_method='rescaling', + num_workers=24, + batch_size=64, + shuffle=True, + validation=False): + dataset = KIDataset(data_path=data_path, + n=n, + min=min, + max=max, + length=length, + norm_method=norm_method, + coordinate_data_path=coordinate_data_path, + return_all=False, + forecast=True, + validation=validation) + dataloader = DataLoader(dataset, + num_workers=num_workers, + batch_size=batch_size, + shuffle=shuffle) + return dataloader + + +def train(config, distributed=True): + if distributed: + num_nodes = int(os.environ['SLURM_NNODES']) + rank = int(os.environ['SLURM_NODEID']) + print(rank, num_nodes) + else: + rank = 0 + num_nodes = 1 + + ID = config['ID'] + save_pkl(config['Checkpoint']['dirpath'] + ID + '_config.pkl', config) + if rank == 0: + print(config) + + + nowcaster_config = config['Nowcaster'] + if rank == 0: + print(nowcaster_config) + + nowcast_net = ConvLSTM_patch(in_chan=1, image_size=128, device='cuda', seq_len=8) + irradiance_net = IrradianceNet(nowcast_net, + opt_patience=nowcaster_config['opt_patience']) + + if rank == 0: + print('All models built') + summary(irradiance_net) + + ckpt_config = config['Checkpoint'] + checkpoint_callback = ModelCheckpoint( + monitor=ckpt_config['monitor'], + dirpath=ckpt_config['dirpath'], + filename=ID + '_' + ckpt_config['filename'], + save_top_k=ckpt_config['save_top_k'], + every_n_epochs=ckpt_config['every_n_epochs'] + ) + + early_stop_callback = EarlyStopping(monitor=ckpt_config['monitor'], + patience=config['EarlyStopping']['patience']) + + tr_config = config['Trainer'] + trainer = pl.Trainer( + default_root_dir=ckpt_config['dirpath'], + accelerator=tr_config['accelerator'], + devices=tr_config['devices'], + num_nodes=num_nodes, + max_epochs=tr_config['max_epochs'], + callbacks=[checkpoint_callback, early_stop_callback], + strategy=tr_config['strategy'], + precision=tr_config['precision'], + enable_progress_bar=(rank == 0), + accumulate_grad_batches=tr_config['accumulate_grad_batches'] + # deterministic=False + ) + if rank == 0: + print('Trainer built') + data_config = config['Dataset'] + train_dataloader = get_dataloader(data_path=data_config['data_path'] + 'TrainingSet/KI/', + coordinate_data_path=data_config['data_path'] + 'CoordinateData/', + n=data_config['n_in']+data_config['n_out'], + min=data_config['min'], + max=data_config['max'], + length=data_config['train_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=True, + validation=False) + + val_dataloader = get_dataloader(data_path=data_config['data_path'] + 'ValidationSet/KI/', + coordinate_data_path=data_config['data_path'] + 'CoordinateData/', + n=data_config['n_in']+data_config['n_out'], + min=data_config['min'], + max=data_config['max'], + length=data_config['val_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=False, + validation=True) + if rank == 0: + print('Training started') + resume_training = tr_config['resume_training'] + if resume_training is None: + trainer.fit(irradiance_net, train_dataloader, val_dataloader) + else: + trainer.fit(irradiance_net, train_dataloader, val_dataloader, + ckpt_path=resume_training) + + +if __name__ == '__main__': + with open('/scratch/snx3000/acarpent/GenerativeNowcasting/SHADECast/Training/Nowcast_training/IrradianceNettrainingconf.yml', + 'r') as o: + config = load(o, Loader) + + # seed_everything(0, workers=0) + + train(config) +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Training/VAE_training/VAETraining_pl.py",".py","5531","139","import os +from torchinfo import summary +import pytorch_lightning as pl +from torch.utils.data import DataLoader +from yaml import load, Loader +from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping +# from pytorch_lightning import seed_everything +from Models.VAE.VariationalAutoEncoder import Encoder, Decoder, VAE +from Dataset.dataset import KIDataset +from utils import save_pkl + + +def get_dataloader(data_path, + coordinate_data_path, + n=12, + min=0.05, + max=1.2, + length=None, + norm_method='rescaling', + num_workers=24, + batch_size=64, + shuffle=True): + dataset = KIDataset(data_path=data_path, + n=n, + min=min, + max=max, + length=length, + norm_method=norm_method, + coordinate_data_path=coordinate_data_path, + return_all=False) + dataloader = DataLoader(dataset, + num_workers=num_workers, + batch_size=batch_size, + shuffle=shuffle) + return dataloader + + +def train(config): + num_nodes = int(os.environ['SLURM_NNODES']) + rank = int(os.environ['SLURM_NODEID']) + print(rank, num_nodes) + + ID = config['ID'] + save_pkl(config['Checkpoint']['dirpath'] + ID + '_config.pkl', config), + print(config) + + encoder_config = config['Encoder'] + encoder = Encoder(in_dim=encoder_config['in_dim'], + levels=encoder_config['levels'], + min_ch=encoder_config['min_ch'], + max_ch=encoder_config['max_ch']) + print('Encoder built') + + decoder_config = config['Decoder'] + decoder = Decoder(in_dim=decoder_config['in_dim'], + levels=decoder_config['levels'], + min_ch=decoder_config['min_ch'], + max_ch=decoder_config['max_ch']) + print('Decoder built') + + vae_config = config['VAE'] + vae = VAE(encoder, + decoder, + kl_weight=vae_config['kl_weight'], + encoded_channels=encoder_config['max_ch'], + hidden_width=vae_config['hidden_width'], + opt_patience=vae_config['opt_patience']) + print('All models built') + + batch_size = config['Dataset']['batch_size'] + n_steps = config['Dataset']['n_steps'] + if rank == 0: + summary(vae, input_size=(batch_size, 1, n_steps, 128, 128)) + + ckpt_config = config['Checkpoint'] + checkpoint_callback = ModelCheckpoint( + monitor=ckpt_config['monitor'], + dirpath=ckpt_config['dirpath'], + filename=ID + '_' + ckpt_config['filename'], + save_top_k=ckpt_config['save_top_k'], + every_n_epochs=ckpt_config['every_n_epochs'] + ) + + early_stop_callback = EarlyStopping(monitor=ckpt_config['monitor'], + patience=config['EarlyStopping']['patience']) + + tr_config = config['Trainer'] + trainer = pl.Trainer( + default_root_dir=ckpt_config['dirpath'], + accelerator=tr_config['accelerator'], + devices=tr_config['devices'], + num_nodes=num_nodes, + max_epochs=tr_config['max_epochs'], + callbacks=[checkpoint_callback, early_stop_callback], + strategy=tr_config['strategy'], + precision=tr_config['precision'], + enable_progress_bar=(rank == 0), + deterministic=True + ) + + data_config = config['Dataset'] + train_dataloader = get_dataloader(data_path=data_config['data_path'] + 'TrainingSet/KI/', + coordinate_data_path=data_config['data_path']+'CoordinateData/', + n=data_config['n_steps'], + min=data_config['min'], + max=data_config['max'], + length=data_config['train_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=True) + + val_dataloader = get_dataloader(data_path=data_config['data_path'] + 'ValidationSet/KI/', + coordinate_data_path=data_config['data_path'] + 'CoordinateData/', + n=data_config['n_steps'], + min=data_config['min'], + max=data_config['max'], + length=data_config['val_length'], + num_workers=data_config['num_workers'], + norm_method=data_config['norm_method'], + batch_size=data_config['batch_size'], + shuffle=False) + print('Training started') + + resume_training = tr_config['resume_training'] + if resume_training is None: + trainer.fit(vae, train_dataloader, val_dataloader) + else: + trainer.fit(vae, train_dataloader, val_dataloader, + ckpt_path=resume_training) + + +if __name__ == '__main__': + with open('VAEtrainingconf.yml', 'r') as o: + config = load(o, Loader) + + # seed_everything(0, workers=0) + train(config) +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/UNet/utils.py",".py","14823","389","import torch +from torch import nn +from torch.nn import functional as F +import math + + +class PositionalEncoding(nn.Module): + def __init__( + self, + embedding_dim: tuple, + dropout: float = 0.1, + max_len: int = 1000, + apply_dropout: bool = False, + ): + """"""Section 3.5 of attention is all you need paper. + + Extended slicing method is used to fill even and odd position of sin, cos with increment of 2. + Ex, `[sin, cos, sin, cos, sin, cos]` for `embedding_dim = 6`. + + `max_len` is equivalent to number of noise steps or patches. `embedding_dim` must same as image + embedding dimension of the model. + + Args: + embedding_dim: `d_model` in given positional encoding formula. + dropout: Dropout amount. + max_len: Number of embeddings to generate. Here, equivalent to total noise steps. + """""" + super(PositionalEncoding, self).__init__() + self.dropout = nn.Dropout(p=dropout) + self.apply_dropout = apply_dropout + + pos_encoding = torch.zeros(max_len, embedding_dim) + position = torch.arange(start=0, end=max_len).unsqueeze(1) + div_term = torch.exp(-math.log(10000.0) * torch.arange(0, embedding_dim, 2).float() / embedding_dim) + + pos_encoding[:, 0::2] = torch.sin(position * div_term) + pos_encoding[:, 1::2] = torch.cos(position * div_term) + self.register_buffer(name='pos_encoding', tensor=pos_encoding, persistent=False) + + def forward(self, t: torch.Tensor) -> torch.Tensor: + """"""Get precalculated positional embedding at timestep t. Outputs same as video implementation + code but embeddings are in [sin, cos, sin, cos] format instead of [sin, sin, cos, cos] in that code. + Also batch dimension is added to final output. + """""" + positional_encoding = self.pos_encoding[t].squeeze(1) + if self.apply_dropout: + return self.dropout(positional_encoding) + return positional_encoding + + +class DoubleConv(nn.Module): + def __init__( + self, + in_channels: int, + out_channels: int, + mid_channels: int = None, + residual: bool = False + ): + """"""Double convolutions as applied in the unet paper architecture. + """""" + super(DoubleConv, self).__init__() + self.residual = residual + if not mid_channels: + mid_channels = out_channels + + self.double_conv = nn.Sequential( + nn.Conv3d( + in_channels=in_channels, out_channels=mid_channels, kernel_size=(1, 3, 3), padding=(0, 1, 1), bias=False + ), + nn.GroupNorm(num_groups=1, num_channels=mid_channels), + nn.GELU(), + nn.Conv3d( + in_channels=mid_channels, out_channels=out_channels, kernel_size=(1, 3, 3), padding=(0, 1, 1), + bias=False, + ), + nn.GroupNorm(num_groups=1, num_channels=out_channels), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + if self.residual: + return F.gelu(x + self.double_conv(x)) + + return self.double_conv(x) + + +class Down(nn.Module): + def __init__(self, in_channels: int, out_channels: int, emb_dim: int = 256): + super(Down, self).__init__() + self.maxpool_conv = nn.Sequential( + nn.MaxPool3d(kernel_size=(1, 2, 2)), + DoubleConv(in_channels=in_channels, out_channels=in_channels, residual=True), + DoubleConv(in_channels=in_channels, out_channels=out_channels), + ) + + self.emb_layer = nn.Sequential( + nn.SiLU(), + nn.Linear(in_features=emb_dim, out_features=out_channels), + ) + + def forward(self, x: torch.Tensor, t_embedding: torch.Tensor) -> torch.Tensor: + """"""Downsamples input tensor, calculates embedding and adds embedding channel wise. + + If, `x.shape == [4, 64, 64, 64]` and `out_channels = 128`, then max_conv outputs [4, 128, 32, 32] by + downsampling in h, w and outputting specified amount of feature maps/channels. + + `t_embedding` is embedding of timestep of shape [batch, time_dim]. It is passed through embedding layer + to output channel dimentsion equivalent to channel dimension of x tensor, so they can be summbed elementwise. + + Since emb_layer output needs to be summed its output is also `emb.shape == [4, 128]`. It needs to be converted + to 4D tensor, [4, 128, 1, 1]. Then the channel dimension is duplicated in all of `H x W` dimension to get + shape of [4, 128, 32, 32]. 128D vector is sample for each pixel position is image. Now the emb_layer output + is summed with max_conv output. + """""" + x = self.maxpool_conv(x) + emb = self.emb_layer(t_embedding) + emb = emb.view(emb.shape[0], emb.shape[1], 1, 1, 1).repeat(1, 1, x.shape[-3], x.shape[-2], x.shape[-1]) + return x + emb + + +class Up(nn.Module): + def __init__(self, in_channels: int, out_channels: int, emb_dim: int = 256): + super(Up, self).__init__() + self.up = nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear', align_corners=True) + self.conv = nn.Sequential( + DoubleConv(in_channels=in_channels, out_channels=in_channels, residual=True), + DoubleConv(in_channels=in_channels, out_channels=out_channels, mid_channels=in_channels // 2), + ) + + self.emb_layer = nn.Sequential( + nn.SiLU(), + nn.Linear(in_features=emb_dim, out_features=out_channels), + ) + + def forward(self, x: torch.Tensor, x_skip: torch.Tensor, t_embedding: torch.Tensor) -> torch.Tensor: + x = self.up(x) + x = torch.cat([x_skip, x], dim=1) + x = self.conv(x) + emb = self.emb_layer(t_embedding) + emb = emb.view(emb.shape[0], emb.shape[1], 1, 1, 1).repeat(1, 1, x.shape[-3], x.shape[-2], x.shape[-1]) + return x + emb + + +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == ""linear"": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == ""cosine"": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == ""sqrt_linear"": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == ""sqrt"": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f""schedule '{schedule}' unknown."") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called ""{ddim_discr_method}""') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """""" + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """""" + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """""" + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """""" + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """""" + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """""" + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """""" + Zero out the parameters of a module and return it. + """""" + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """""" + Scale the parameters of a module and return it. + """""" + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """""" + Take the mean over all non-batch dimensions. + """""" + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + + +def normalization(channels): + """""" + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """""" + return nn.Identity() #GroupNorm32(32, channels) + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() + + +def conv_nd(dims, *args, **kwargs): + """""" + Create a 1D, 2D, or 3D convolution module. + """""" + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f""unsupported dimensions: {dims}"") + + +def linear(*args, **kwargs): + """""" + Create a linear module. + """""" + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """""" + Create a 1D, 2D, or 3D average pooling module. + """""" + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f""unsupported dimensions: {dims}"") +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/UNet/UNet.py",".py","17727","493",""""""" +From https://github.com/MeteoSwiss/ldcast/blob/master/ldcast/models/genforecast/unet.py + +"""""" + +from abc import abstractmethod +from functools import partial +import math +from typing import Iterable + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from SHADECast.Models.UNet.utils import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from SHADECast.Blocks.AFNO import AFNOCrossAttentionBlock3d +SpatialTransformer = type(None) + + +class TimestepBlock(nn.Module): + """""" + Any module where forward() takes timestep embeddings as a second argument. + """""" + + @abstractmethod + def forward(self, x, emb): + """""" + Apply the module to `x` given `emb` timestep embeddings. + """""" + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """""" + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """""" + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, AFNOCrossAttentionBlock3d): + img_shape = tuple(x.shape[-2:]) + x = layer(x, context[img_shape]) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """""" + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """""" + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode=""nearest"" + ) + else: + x = F.interpolate(x, scale_factor=2, mode=""nearest"") + if self.use_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + """""" + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """""" + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """""" + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """""" + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """""" + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """""" + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class UNetModel(nn.Module): + """""" + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + + """""" + + def __init__( + self, + model_channels, + in_channels=1, + out_channels=1, + num_res_blocks=2, + attention_resolutions=(1, 2, 4), + context_ch=128, + dropout=0, + channel_mult=(1, 2, 4, 4), + conv_resample=True, + dims=3, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + legacy=True, + num_timesteps=1 + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + timesteps = th.arange(1, num_timesteps+1) + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + dim_head = num_head_channels + layers.append( + AFNOCrossAttentionBlock3d( + ch, context_dim=context_ch[level], num_blocks=num_heads, + data_format=""channels_first"", timesteps=timesteps + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + dim_head = num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AFNOCrossAttentionBlock3d( + ch, context_dim=context_ch[-1], num_blocks=num_heads, + data_format=""channels_first"", timesteps=timesteps + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(num_res_blocks + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = num_head_channels + layers.append( + AFNOCrossAttentionBlock3d( + ch, context_dim=context_ch[level], num_blocks=num_heads, + data_format=""channels_first"", timesteps=timesteps + ) + ) + if level and i == num_res_blocks: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + + def forward(self, x, timesteps=None, context=None): + """""" + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :return: an [N x C x ...] Tensor of outputs. + """""" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + return self.out(h) +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/Nowcaster/Nowcast.py",".py","8968","270","import collections +import torch +from torch import nn +from torch.nn import functional as F +import pytorch_lightning as pl + +from SHADECast.Blocks.attention import TemporalTransformer, positional_encoding +from SHADECast.Blocks.AFNO import AFNOBlock3d +from SHADECast.Blocks.ResBlock3D import ResBlock3D +import numpy as np + + +class Nowcaster(pl.LightningModule): + def __init__(self, nowcast_net, opt_patience, loss_type='l1'): + super().__init__() + self.nowcast_net = nowcast_net + self.opt_patience = opt_patience + self.loss_type = loss_type + + def forward(self, x): + return self.nowcast_net(x) + + def _loss(self, batch): + x, y = batch + + if self.loss_type == 'l1': + y_pred = self.forward(x) + return (y - y_pred).abs().mean() + + elif self.loss_type == 'l2': + y_pred = self.forward(x) + return (y - y_pred).square().mean() + + elif self.loss_type == 'latent': + y, _ = self.nowcast_net.autoencoder.encode(y) + x = self.nowcast_net.latent_forward(x) + y_pred = self.nowcast_net.out_proj(x) + return (y - y_pred).abs().mean() + else: + AssertionError('Loss type must be ""l1"" or ""l2""') + + def training_step(self, batch, batch_idx): + loss = self._loss(batch) + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + self.log('train_loss', loss, **log_params) + return loss + + @torch.no_grad() + def val_test_step(self, batch, batch_idx, split=""val""): + loss = self._loss(batch) + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + self.log(f""{split}_loss"", loss, **log_params) + + def validation_step(self, batch, batch_idx): + self.val_test_step(batch, batch_idx, split=""val"") + + def test_step(self, batch, batch_idx): + self.val_test_step(batch, batch_idx, split=""test"") + + def configure_optimizers(self): + optimizer = torch.optim.AdamW( + self.parameters(), lr=1e-3, + betas=(0.5, 0.9), weight_decay=1e-3 + ) + reduce_lr = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, patience=self.opt_patience, factor=0.25, verbose=True + ) + + optimizer_spec = { + ""optimizer"": optimizer, + ""lr_scheduler"": { + ""scheduler"": reduce_lr, + ""monitor"": ""val_loss"", + ""frequency"": 1, + }, + } + return optimizer_spec + + +class FusionBlock3d(nn.Module): + def __init__(self, dim, size_ratios, dim_out=None, afno_fusion=False): + super().__init__() + + N_sources = len(size_ratios) + if not isinstance(dim, collections.abc.Sequence): + dim = (dim,) * N_sources + if dim_out is None: + dim_out = dim[0] + + self.scale = nn.ModuleList() + for (i, size_ratio) in enumerate(size_ratios): + if size_ratio == 1: + scale = nn.Identity() + else: + scale = [] + while size_ratio > 1: + scale.append(nn.ConvTranspose3d( + dim[i], dim_out if size_ratio == 2 else dim[i], + kernel_size=(1, 3, 3), stride=(1, 2, 2), + padding=(0, 1, 1), output_padding=(0, 1, 1) + )) + size_ratio //= 2 + scale = nn.Sequential(*scale) + self.scale.append(scale) + + self.afno_fusion = afno_fusion + + if self.afno_fusion: + if N_sources > 1: + self.fusion = nn.Sequential( + nn.Linear(sum(dim), sum(dim)), + AFNOBlock3d(dim * N_sources, mlp_ratio=2), + nn.Linear(sum(dim), dim_out) + ) + else: + self.fusion = nn.Identity() + + def resize_proj(self, x, i): + x = x.permute(0, 4, 1, 2, 3) + x = self.scale[i](x) + x = x.permute(0, 2, 3, 4, 1) + return x + + def forward(self, x): + x = [self.resize_proj(xx, i) for (i, xx) in enumerate(x)] + if self.afno_fusion: + x = torch.concat(x, axis=-1) + x = self.fusion(x) + else: + x = sum(x) + return x + + +class AFNONowcastNetBase(nn.Module): + def __init__( + self, + autoencoder, + embed_dim=128, + embed_dim_out=None, + analysis_depth=4, + forecast_depth=4, + input_steps=1, + output_steps=2, + train_autoenc=False + ): + super().__init__() + + self.train_autoenc = train_autoenc + self.embed_dim = embed_dim + self.embed_dim_out = embed_dim_out + self.output_steps = output_steps + self.input_steps = input_steps + + # encoding + analysis for each input + ae = autoencoder.requires_grad_(train_autoenc) + self.autoencoder = ae + + self.proj = nn.Conv3d(ae.hidden_width, embed_dim, kernel_size=1) + + self.analysis = nn.Sequential( + *(AFNOBlock3d(embed_dim) for _ in range(analysis_depth)) + ) + + # temporal transformer + self.use_temporal_transformer = input_steps != output_steps + if self.use_temporal_transformer: + self.temporal_transformer = TemporalTransformer(embed_dim) + + # # data fusion + # self.fusion = FusionBlock3d(embed_dim, input_size_ratios, + # afno_fusion=afno_fusion, dim_out=embed_dim_out) + + # forecast + self.forecast = nn.Sequential( + *(AFNOBlock3d(embed_dim_out) for _ in range(forecast_depth)) + ) + + def add_pos_enc(self, x, t): + if t.shape[1] != x.shape[1]: + # this can happen if x has been compressed + # by the autoencoder in the time dimension + ds_factor = t.shape[1] // x.shape[1] + t = F.avg_pool1d(t.unsqueeze(1), ds_factor)[:, 0, :] + + pos_enc = positional_encoding(t, x.shape[-1], add_dims=(2, 3)) + return x + pos_enc + + def forward(self, x): + # (x, t_relative) = list(zip(*x)) + + # encoding + analysis for each input + # def process_input(i): + x = self.autoencoder.encode(x)[0] + x = self.proj(x) + x = x.permute(0, 2, 3, 4, 1) + x = self.analysis(x) + if self.use_temporal_transformer: + # add positional encoding + t = torch.arange(0, self.input_steps, device=x.device) + expand_shape = x.shape[:1] + (-1,) + x.shape[2:] + pos_enc_output = positional_encoding( + t, + self.embed_dim, add_dims=(0, 2, 3) + ) + pe_out = pos_enc_output.expand(*expand_shape) + x = x + pe_out + + # transform to output shape and coordinates + pos_enc_output = positional_encoding( + torch.arange(self.input_steps, self.output_steps + 1, device=x.device), + self.embed_dim, add_dims=(0, 2, 3) + ) + pe_out = pos_enc_output.expand(*expand_shape) + x = self.temporal_transformer(pe_out, x) + + x = self.forecast(x) + return x.permute(0, 4, 1, 2, 3) # to channels-first order + + +class AFNONowcastNet(AFNONowcastNetBase): + def __init__(self, autoencoder, **kwargs): + super().__init__(autoencoder, **kwargs) + + self.output_autoencoder = autoencoder.requires_grad_( + self.train_autoenc) + self.out_proj = nn.Conv3d( + self.embed_dim_out, autoencoder.hidden_width, kernel_size=1 + ) + + def latent_forward(self, x): + x = super().forward(x) + return x + + def forward(self, x): + x = self.latent_forward(x) + x = self.out_proj(x) + return self.output_autoencoder.decode(x) + + +class AFNONowcastNetCascade(nn.Module): + def __init__(self, + nowcast_net, + cascade_depth=4, + train_net=False): + super().__init__() + self.cascade_depth = cascade_depth + self.nowcast_net = nowcast_net + for p in self.nowcast_net.parameters(): + p.requires_grad = train_net + self.resnet = nn.ModuleList() + ch = self.nowcast_net.embed_dim_out + self.cascade_dims = [ch] + for i in range(cascade_depth - 1): + ch_out = 2 * ch + self.cascade_dims.append(ch_out) + self.resnet.append( + ResBlock3D(ch, ch_out, kernel_size=(1, 3, 3), norm=None) + ) + ch = ch_out + + def forward(self, x): + x = self.nowcast_net.latent_forward(x) + img_shape = tuple(x.shape[-2:]) + cascade = {img_shape: x} + for i in range(self.cascade_depth - 1): + x = F.avg_pool3d(x, (1, 2, 2)) + x = self.resnet[i](x) + img_shape = tuple(x.shape[-2:]) + cascade[img_shape] = x + return cascade","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/Diffusion/ema.py",".py","2982","76","import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates + else torch.tensor(-1,dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + #remove as '.'-character is not allowed in buffers + s_name = name.replace('.','') + self.m_name2s_name.update({name:s_name}) + self.register_buffer(s_name,p.clone().detach().data) + + self.collected_params = [] + + def forward(self,model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """""" + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """""" + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """""" + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """""" + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data)","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/Diffusion/utils.py",".py","8866","245","# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == ""linear"": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == ""cosine"": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == ""sqrt_linear"": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == ""sqrt"": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f""schedule '{schedule}' unknown."") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called ""{ddim_discr_method}""') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """""" + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """""" + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """""" + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """""" + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """""" + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """""" + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """""" + Zero out the parameters of a module and return it. + """""" + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """""" + Scale the parameters of a module and return it. + """""" + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """""" + Take the mean over all non-batch dimensions. + """""" + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + + +def normalization(channels): + """""" + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """""" + return nn.Identity() #GroupNorm32(32, channels) + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() + + +def conv_nd(dims, *args, **kwargs): + """""" + Create a 1D, 2D, or 3D convolution module. + """""" + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f""unsupported dimensions: {dims}"") + + +def linear(*args, **kwargs): + """""" + Create a linear module. + """""" + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """""" + Create a 1D, 2D, or 3D average pooling module. + """""" + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f""unsupported dimensions: {dims}"")","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/Diffusion/DiffusionModel.py",".py","8616","229",""""""" +From https://github.com/CompVis/latent-diffusion/main/ldm/models/diffusion/ddpm.py + +The original file acknowledges: +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +"""""" + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from contextlib import contextmanager +from functools import partial + + +from SHADECast.Models.Diffusion.utils import make_beta_schedule, extract_into_tensor, noise_like, timestep_embedding +from SHADECast.Models.Diffusion.ema import LitEma + + +class LatentDiffusion(pl.LightningModule): + def __init__(self, + model, + autoencoder, + context_encoder=None, + timesteps=1000, + beta_schedule=""linear"", + loss_type=""l2"", + use_ema=True, + lr=1e-4, + lr_warmup=0, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + parameterization=""eps"", # all assuming fixed variance schedules + opt_patience=5, + get_t=False, + **kwargs + ): + super().__init__() + self.model = model + self.autoencoder = autoencoder.requires_grad_(False) + self.conditional = (context_encoder is not None) + self.context_encoder = context_encoder + self.lr = lr + self.lr_warmup = lr_warmup + self.opt_patience = opt_patience + self.get_t = get_t + assert parameterization in [""eps"", ""x0""], 'currently only supporting ""eps"" and ""x0""' + self.parameterization = parameterization + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + + self.register_schedule( + beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s + ) + + self.loss_type = loss_type + + def register_schedule(self, beta_schedule=""linear"", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + + betas = make_beta_schedule( + beta_schedule, timesteps, + linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s + ) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f""{context}: Switched to EMA weights"") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f""{context}: Restored training weights"") + + def apply_model(self, x_noisy, t, cond=None, return_ids=False): + # if self.conditional: + # cond = self.context_encoder(cond) + with self.ema_scope(): + return self.model(x_noisy, t, context=cond) + + def q_sample(self, x_start, t, noise=None): + if noise is None: + noise = torch.randn_like(x_start) + return ( + extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise + ) + + def get_loss(self, pred, target): + if self.loss_type == 'l1': + loss = (target - pred).abs() + elif self.loss_type == 'l2': + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError(""unknown loss type '{loss_type}'"") + return loss.mean() + + def p_losses(self, x_start, t, noise=None, context=None): + if noise is None: + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t, context=context) + if self.parameterization == ""eps"": + target = noise + yhat = x_noisy - model_out + elif self.parameterization == ""x0"": + target = x_start + yhat = model_out + else: + raise NotImplementedError(f""Parameterization {self.parameterization} not yet supported"") + return self.get_loss(model_out, target) + + def forward(self, x, t=None, *args, **kwargs): + if t is None: + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def shared_step(self, batch, t=None): + if len(batch) == 2: + (x, y) = batch + context = self.context_encoder(x) if self.conditional else None + elif len(batch) == 3: + (x, y, c) = batch + context = self.context_encoder(x, c) if self.conditional else None + loss = self(self.autoencoder.encode(y)[0], t=t, context=context) + return loss + + + def training_step(self, batch, batch_idx): + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + loss = self.shared_step(batch) + self.log(""train_loss"", loss, **log_params) + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + if self.get_t: + t = torch.tensor(batch[-1], dtype=torch.int8) + batch = batch[:-1] + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + loss = self.shared_step(batch, t) + + with self.ema_scope(): + loss_ema = self.shared_step(batch, t) + self.log(""val_loss"", loss, **log_params) + self.log(""val_loss_ema"", loss_ema, **log_params) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def configure_optimizers(self): + optimizer = torch.optim.AdamW(self.parameters(), + lr=self.lr, + betas=(0.5, 0.9), + weight_decay=1e-3) + reduce_lr = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, patience=self.opt_patience, factor=0.25, verbose=True + ) + return { + ""optimizer"": optimizer, + ""lr_scheduler"": { + ""scheduler"": reduce_lr, + ""monitor"": ""val_loss_ema"", + ""frequency"": 1, + }, + } + + # def on_before_optimizer_step(self, optimizer): + # # Compute the 2-norm for each layer + # # If using mixed precision, the gradients are already unscaled here + # norms = grad_norm(self.layer, norm_type=2) + # self.log_dict(norms) + + def optimizer_step( + self, + epoch, + batch_idx, + optimizer, + optimizer_idx, + optimizer_closure, + **kwargs + ): + if self.trainer.global_step < self.lr_warmup: + lr_scale = (self.trainer.global_step + 1) / self.lr_warmup + for pg in optimizer.param_groups: + pg['lr'] = lr_scale * self.lr + + super().optimizer_step( + epoch, batch_idx, optimizer, + optimizer_idx, optimizer_closure, + **kwargs + ) + + +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/Sampler/PLMS.py",".py","12852","251","import torch +import numpy as np +from tqdm import tqdm +from Models.Sampler.utils import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like + + +"""""" +From: https://github.com/CompVis/latent-diffusion/blob/main/ldm/models/diffusion/plms.py +"""""" + + +""""""SAMPLING ONLY."""""" + +import torch +import numpy as np +from tqdm import tqdm + +from Models.Diffusion.utils import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like + + +class PLMSSampler: + def __init__(self, model, schedule=""linear"", **kwargs): + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + #if type(attr) == torch.Tensor: + # if attr.device != torch.device(""cuda""): + # attr = attr.to(torch.device(""cuda"")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize=""uniform"", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + progbar=True, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + """""" + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f""Warning: Got {cbs} conditionings but batch-size is {batch_size}"") + else: + if conditioning.shape[0] != batch_size: + print(f""Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}"") + """""" + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + size = (batch_size,) + shape + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + progbar=progbar + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, progbar=True): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f""Running PLMS Sampling with {total_steps} timesteps"") + + iterator = time_range + if progbar: + iterator = tqdm(iterator, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t ,c): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == ""eps"" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(x, e_t, index): + # select parameters corresponding to the currently considered timestep + param_shape = (b,) + (1,)*(x.ndim-1) + a_t = torch.full(param_shape, alphas[index], device=device) + a_prev = torch.full(param_shape, alphas_prev[index], device=device) + sigma_t = torch.full(param_shape, sigmas[index], device=device) + sqrt_one_minus_at = torch.full(param_shape, sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t, c) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(x, e_t, index) + e_t_next = get_model_output(x_prev, t_next, c) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(x, e_t_prime, index) + + return x_prev, pred_x0, e_t","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/Sampler/utils.py",".py","2282","48","# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import torch +import numpy as np + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called ""{ddim_discr_method}""') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according to the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise()","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Models/VAE/VariationalAutoEncoder.py",".py","5113","141","import torch +import torch.nn as nn +import pytorch_lightning as pl +import numpy as np +from SHADECast.Blocks.ResBlock3D import ResBlock3D +from utils import sample_from_standard_normal, kl_from_standard_normal + + +class Encoder(nn.Sequential): + def __init__(self, in_dim=1, levels=2, min_ch=64, max_ch=64): + sequence = [] + channels = np.hstack((in_dim, np.arange(1, (levels + 1)) * min_ch)) + channels[channels > max_ch] = max_ch + for i in range(levels): + in_channels = int(channels[i]) + out_channels = int(channels[i + 1]) + res_kernel_size = (3, 3, 3) if i == 0 else (1, 3, 3) + res_block = ResBlock3D( + in_channels, out_channels, + kernel_size=res_kernel_size, + norm_kwargs={""num_groups"": 1} + ) + sequence.append(res_block) + downsample = nn.Conv3d(out_channels, out_channels, + kernel_size=(2, 2, 2), stride=(2, 2, 2)) + sequence.append(downsample) + + super().__init__(*sequence) + + +class Decoder(nn.Sequential): + def __init__(self, in_dim=1, levels=2, min_ch=64, max_ch=64): + sequence = [] + channels = np.hstack((in_dim, np.arange(1, (levels + 1)) * min_ch)) + channels[channels > max_ch] = max_ch + for i in reversed(list(range(levels))): + in_channels = int(channels[i + 1]) + out_channels = int(channels[i]) + upsample = nn.ConvTranspose3d(in_channels, in_channels, + kernel_size=(2, 2, 2), stride=(2, 2, 2)) + sequence.append(upsample) + res_kernel_size = (3, 3, 3) if (i == 0) else (1, 3, 3) + res_block = ResBlock3D( + in_channels, out_channels, + kernel_size=res_kernel_size, + norm_kwargs={""num_groups"": 1} + ) + sequence.append(res_block) + + super().__init__(*sequence) + + +class VAE(pl.LightningModule): + def __init__(self, + encoder, + decoder, + kl_weight, + encoded_channels, + hidden_width, + opt_patience, + **kwargs): + super().__init__(**kwargs) + self.save_hyperparameters(ignore=['encoder', 'decoder']) + self.encoder = encoder + self.decoder = decoder + self.hidden_width = hidden_width + self.opt_patience = opt_patience + self.to_moments = nn.Conv3d(encoded_channels, 2 * hidden_width, + kernel_size=1) + self.to_decoder = nn.Conv3d(hidden_width, encoded_channels, + kernel_size=1) + + self.log_var = nn.Parameter(torch.zeros(size=())) + self.kl_weight = kl_weight + + def encode(self, x): + h = self.encoder(x) + (mean, log_var) = torch.chunk(self.to_moments(h), 2, dim=1) + return mean, log_var + + def decode(self, z): + z = self.to_decoder(z) + dec = self.decoder(z) + return dec + + def forward(self, x, sample_posterior=True): + (mean, log_var) = self.encode(x) + if sample_posterior: + z = sample_from_standard_normal(mean, log_var) + else: + z = mean + dec = self.decode(z) + return dec, mean, log_var + + def _loss(self, batch): + x = batch + + (y_pred, mean, log_var) = self.forward(x) + + rec_loss = (x - y_pred).abs().mean() + kl_loss = kl_from_standard_normal(mean, log_var) + + total_loss = (1 - self.kl_weight) * rec_loss + self.kl_weight * kl_loss + + return total_loss, rec_loss, kl_loss + + def training_step(self, batch, batch_idx): + loss = self._loss(batch)[0] + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + self.log('train_loss', loss, **log_params) + return loss + + @torch.no_grad() + def val_test_step(self, batch, batch_idx, split=""val""): + (total_loss, rec_loss, kl_loss) = self._loss(batch) + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + self.log(f""{split}_loss"", total_loss, **log_params) + self.log(f""{split}_rec_loss"", rec_loss.mean(), **log_params) + self.log(f""{split}_kl_loss"", kl_loss, **log_params) + + def validation_step(self, batch, batch_idx): + self.val_test_step(batch, batch_idx, split=""val"") + + def test_step(self, batch, batch_idx): + self.val_test_step(batch, batch_idx, split=""test"") + + def configure_optimizers(self): + optimizer = torch.optim.AdamW(self.parameters(), lr=1e-3, + betas=(0.5, 0.9), weight_decay=1e-3) + reduce_lr = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, patience=self.opt_patience, factor=0.25, verbose=True + ) + return { + ""optimizer"": optimizer, + ""lr_scheduler"": { + ""scheduler"": reduce_lr, + ""monitor"": ""val_rec_loss"", + ""frequency"": 1, + }, + } +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Blocks/ResBlock3D.py",".py","5921","141",""""""" +From https://github.com/MeteoSwiss/ldcast/blob/master/ldcast/models/blocks/resnet.py +"""""" + +import torch +import torch.nn as nn +from torch.nn.utils.parametrizations import spectral_norm as sn +from utils import activation, normalization + + +class ResBlock3D(nn.Module): + def __init__( + self, in_channels, out_channels, resample=None, + resample_factor=(1, 1, 1), kernel_size=(3, 3, 3), + act='swish', norm='group', norm_kwargs=None, + spectral_norm=False, + **kwargs + ): + super().__init__(**kwargs) + if in_channels != out_channels: + self.proj = nn.Conv3d(in_channels, out_channels, kernel_size=1) + else: + self.proj = nn.Identity() + + padding = tuple(k // 2 for k in kernel_size) + if resample == ""down"": + self.resample = nn.AvgPool3d(resample_factor, ceil_mode=True) + self.conv1 = nn.Conv3d(in_channels, out_channels, + kernel_size=kernel_size, stride=resample_factor, + padding=padding) + self.conv2 = nn.Conv3d(out_channels, out_channels, + kernel_size=kernel_size, padding=padding) + elif resample == ""up"": + self.resample = nn.Upsample( + scale_factor=resample_factor, mode='trilinear') + self.conv1 = nn.ConvTranspose3d(in_channels, out_channels, + kernel_size=kernel_size, padding=padding) + output_padding = tuple( + 2 * p + s - k for (p, s, k) in zip(padding, resample_factor, kernel_size) + ) + self.conv2 = nn.ConvTranspose3d(out_channels, out_channels, + kernel_size=kernel_size, stride=resample_factor, + padding=padding, output_padding=output_padding) + else: + self.resample = nn.Identity() + self.conv1 = nn.Conv3d(in_channels, out_channels, + kernel_size=kernel_size, padding=padding) + self.conv2 = nn.Conv3d(out_channels, out_channels, + kernel_size=kernel_size, padding=padding) + + if isinstance(act, str): + act = (act, act) + self.act1 = activation(act_type=act[0]) + self.act2 = activation(act_type=act[1]) + + if norm_kwargs is None: + norm_kwargs = {} + self.norm1 = normalization(in_channels, norm_type=norm, **norm_kwargs) + self.norm2 = normalization(out_channels, norm_type=norm, **norm_kwargs) + if spectral_norm: + self.conv1 = sn(self.conv1) + self.conv2 = sn(self.conv2) + if not isinstance(self.proj, nn.Identity): + self.proj = sn(self.proj) + + def forward(self, x): + x_in = self.resample(self.proj(x)) + x = self.norm1(x) + x = self.act1(x) + x = self.conv1(x) + x = self.norm2(x) + x = self.act2(x) + x = self.conv2(x) + return x + x_in + + +class ResBlock2D(nn.Module): + def __init__( + self, in_channels, out_channels, resample=None, + resample_factor=(1, 1), kernel_size=(3, 3), + act='swish', norm='group', norm_kwargs=None, + spectral_norm=False, + **kwargs + ): + super().__init__(**kwargs) + if in_channels != out_channels: + self.proj = nn.Conv2d(in_channels, out_channels, kernel_size=1) + else: + self.proj = nn.Identity() + + padding = tuple(k // 2 for k in kernel_size) + if resample == ""down"": + self.resample = nn.AvgPool2d(resample_factor, ceil_mode=True) + self.conv1 = nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, stride=resample_factor, + padding=padding) + self.conv2 = nn.Conv2d(out_channels, out_channels, + kernel_size=kernel_size, padding=padding) + elif resample == ""up"": + self.resample = nn.Upsample( + scale_factor=resample_factor, mode='trilinear') + self.conv1 = nn.ConvTranspose3d(in_channels, out_channels, + kernel_size=kernel_size, padding=padding) + output_padding = tuple( + 2 * p + s - k for (p, s, k) in zip(padding, resample_factor, kernel_size) + ) + self.conv2 = nn.ConvTranspose3d(out_channels, out_channels, + kernel_size=kernel_size, stride=resample_factor, + padding=padding, output_padding=output_padding) + else: + self.resample = nn.Identity() + self.conv1 = nn.Conv2d(in_channels, out_channels, + kernel_size=kernel_size, padding=padding) + self.conv2 = nn.Conv2d(out_channels, out_channels, + kernel_size=kernel_size, padding=padding) + + if isinstance(act, str): + act = (act, act) + self.act1 = activation(act_type=act[0]) + self.act2 = activation(act_type=act[1]) + + if norm_kwargs is None: + norm_kwargs = {} + self.norm1 = normalization(in_channels, norm_type=norm, **norm_kwargs) + self.norm2 = normalization(out_channels, norm_type=norm, **norm_kwargs) + if spectral_norm: + self.conv1 = sn(self.conv1) + self.conv2 = sn(self.conv2) + if not isinstance(self.proj, nn.Identity): + self.proj = sn(self.proj) + + def forward(self, x): + x_in = self.resample(self.proj(x)) + x = self.norm1(x) + x = self.act1(x) + x = self.conv1(x) + x = self.norm2(x) + x = self.act2(x) + x = self.conv2(x) + return x + x_in +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Blocks/TimeStep.py",".py","951","32","from abc import abstractmethod +import torch.nn as nn +from SHADECast.Blocks.AFNO import AFNOCrossAttentionBlock3d + +class TimestepBlock(nn.Module): + """""" + Any module where forward() takes timestep embeddings as a second argument. + """""" + + @abstractmethod + def forward(self, x, emb): + """""" + Apply the module to `x` given `emb` timestep embeddings. + """""" + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """""" + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """""" + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, AFNOCrossAttentionBlock3d): + img_shape = tuple(x.shape[-2:]) + x = layer(x, context[img_shape]) + else: + x = layer(x) + return x","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Blocks/AFNO.py",".py","8604","231",""""""" +From https://github.com/MeteoSwiss/ldcast/blob/master/ldcast/models/blocks/afno.py +"""""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class AFNO3D(nn.Module): + def __init__( + self, hidden_size, num_blocks=8, sparsity_threshold=0.01, + hard_thresholding_fraction=1, hidden_size_factor=1, res_mult=1 + ): + super().__init__() + assert hidden_size % num_blocks == 0, f""hidden_size {hidden_size} should be divisble by num_blocks {num_blocks}"" + + self.hidden_size = hidden_size + self.sparsity_threshold = sparsity_threshold + self.num_blocks = num_blocks + self.block_size = self.hidden_size // self.num_blocks + self.hard_thresholding_fraction = hard_thresholding_fraction + self.hidden_size_factor = hidden_size_factor + self.scale = 0.02 + self.res_mult = res_mult + + self.w1 = nn.Parameter( + self.scale * torch.randn(2, self.num_blocks, self.block_size, self.block_size * self.hidden_size_factor)) + self.b1 = nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size * self.hidden_size_factor)) + self.w2 = nn.Parameter( + self.scale * torch.randn(2, self.num_blocks, self.block_size * self.hidden_size_factor, self.block_size)) + self.b2 = nn.Parameter(self.scale * torch.randn(2, self.num_blocks, self.block_size)) + + def forward(self, x): + bias = x + + dtype = x.dtype + x = x.float() + B, D, H, W, C = x.shape + + x = torch.fft.rfftn(x, dim=(1, 2, 3), norm=""ortho"") + x = x.reshape(B, D, H, W // 2 + 1, self.num_blocks, self.block_size) + + o1_real = torch.zeros([B, D, H, W // 2 + 1, self.num_blocks, self.block_size * self.hidden_size_factor], + device=x.device) + o1_imag = torch.zeros([B, D, H, W // 2 + 1, self.num_blocks, self.block_size * self.hidden_size_factor], + device=x.device) + o2_real = torch.zeros(x.shape, device=x.device) + o2_imag = torch.zeros(x.shape, device=x.device) + + total_modes = H // 2 + 1 + kept_modes = int(total_modes * self.hard_thresholding_fraction) + + o1_real[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes] = F.relu( + torch.einsum('...bi,bio->...bo', + x[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes].real, self.w1[0]) - + torch.einsum('...bi,bio->...bo', + x[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes].imag, self.w1[1]) + + self.b1[0] + ) + + o1_imag[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes] = F.relu( + torch.einsum('...bi,bio->...bo', + x[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes].imag, self.w1[0]) + + torch.einsum('...bi,bio->...bo', + x[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes].real, self.w1[1]) + + self.b1[1] + ) + + o2_real[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes] = ( + torch.einsum('...bi,bio->...bo', + o1_real[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes], + self.w2[0]) - + torch.einsum('...bi,bio->...bo', + o1_imag[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes], + self.w2[1]) + + self.b2[0] + ) + + o2_imag[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes] = ( + torch.einsum('...bi,bio->...bo', + o1_imag[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes], + self.w2[0]) + + torch.einsum('...bi,bio->...bo', + o1_real[:, :, total_modes - kept_modes:total_modes + kept_modes, :kept_modes], + self.w2[1]) + + self.b2[1] + ) + + x = torch.stack([o2_real, o2_imag], dim=-1) + x = F.softshrink(x, lambd=self.sparsity_threshold) + x = torch.view_as_complex(x) + x = x.reshape(B, D, H, W // 2 + 1, C) + x = torch.fft.irfftn(x, s=(D, H*self.res_mult, W*self.res_mult), dim=(1, 2, 3), norm=""ortho"") + x = x.type(dtype) + if self.res_mult>1: + return x + else: + return x + bias + + +class Mlp(nn.Module): + def __init__( + self, + in_features, hidden_features=None, out_features=None, + act_layer=nn.GELU, drop=0.0 + ): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) if drop > 0 else nn.Identity() + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class AFNOBlock3d(nn.Module): + def __init__( + self, + dim, + mlp_ratio=4., + drop=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm, + double_skip=True, + num_blocks=8, + sparsity_threshold=0.01, + hard_thresholding_fraction=1.0, + data_format=""channels_last"", + mlp_out_features=None, + afno_res_mult=1, + + ): + super().__init__() + self.norm_layer = norm_layer + self.afno_res_mult = afno_res_mult + self.norm1 = norm_layer(dim) + self.filter = AFNO3D(dim, num_blocks, sparsity_threshold, + hard_thresholding_fraction, res_mult=afno_res_mult) + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp( + in_features=dim, out_features=mlp_out_features, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, drop=drop + ) + self.double_skip = double_skip + self.channels_first = (data_format == ""channels_first"") + + def forward(self, x): + if self.channels_first: + # AFNO natively uses a channels-last data format + x = x.permute(0, 2, 3, 4, 1) + + residual = x + x = self.norm1(x) + x = self.filter(x) + if self.afno_res_mult > 1: + residual = F.interpolate(residual, x.shape[2:]) + if self.double_skip: + x = x + residual + residual = x + + x = self.norm2(x) + x = self.mlp(x) + x = x + residual + + if self.channels_first: + x = x.permute(0, 4, 1, 2, 3) + + return x + + +class AFNOCrossAttentionBlock3d(nn.Module): + """""" AFNO 3D Block with channel mixing from two sources. + """""" + + def __init__( + self, + dim, + context_dim, + mlp_ratio=2., + drop=0., + act_layer=nn.GELU, + norm_layer=nn.Identity, + double_skip=True, + num_blocks=8, + sparsity_threshold=0.01, + hard_thresholding_fraction=1.0, + data_format=""channels_last"", + timesteps=None + ): + super().__init__() + + self.norm1 = norm_layer(dim) + self.norm2 = norm_layer(dim + context_dim) + mlp_hidden_dim = int((dim + context_dim) * mlp_ratio) + self.pre_proj = nn.Linear(dim + context_dim, dim + context_dim) + self.filter = AFNO3D(dim + context_dim, num_blocks, sparsity_threshold, + hard_thresholding_fraction) + self.mlp = Mlp( + in_features=dim + context_dim, + out_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, drop=drop + ) + self.channels_first = (data_format == ""channels_first"") + + def forward(self, x, y): + if self.channels_first: + # AFNO natively uses a channels-last order + x = x.permute(0, 2, 3, 4, 1) + y = y.permute(0, 2, 3, 4, 1) + + xy = torch.concat((self.norm1(x), y), axis=-1) + xy = self.pre_proj(xy) + xy + xy = self.filter(self.norm2(xy)) + xy # AFNO filter + x = self.mlp(xy) + x # feed-forward + + if self.channels_first: + x = x.permute(0, 4, 1, 2, 3) + + return x","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","SHADECast/Blocks/attention.py",".py","3277","108",""""""" +From https://github.com/MeteoSwiss/ldcast/blob/master/ldcast/models/blocks/attention.py +"""""" + +import math +import torch +from torch import nn +import torch.nn.functional as F + + +class TemporalAttention(nn.Module): + def __init__( + self, channels, context_channels=None, + head_dim=32, num_heads=8 + ): + super().__init__() + self.channels = channels + if context_channels is None: + context_channels = channels + self.context_channels = context_channels + self.head_dim = head_dim + self.num_heads = num_heads + self.inner_dim = head_dim * num_heads + self.attn_scale = self.head_dim ** -0.5 + if channels % num_heads: + raise ValueError(""channels must be divisible by num_heads"") + self.KV = nn.Linear(context_channels, self.inner_dim * 2) + self.Q = nn.Linear(channels, self.inner_dim) + self.proj = nn.Linear(self.inner_dim, channels) + + def forward(self, x, y=None): + if y is None: + y = x + + (K, V) = self.KV(y).chunk(2, dim=-1) + (B, Dk, H, W, C) = K.shape + shape = (B, Dk, H, W, self.num_heads, self.head_dim) + K = K.reshape(shape) + V = V.reshape(shape) + + Q = self.Q(x) + (B, Dq, H, W, C) = Q.shape + shape = (B, Dq, H, W, self.num_heads, self.head_dim) + Q = Q.reshape(shape) + + K = K.permute((0, 2, 3, 4, 5, 1)) # K^T + V = V.permute((0, 2, 3, 4, 1, 5)) + Q = Q.permute((0, 2, 3, 4, 1, 5)) + + attn = torch.matmul(Q, K) * self.attn_scale + attn = F.softmax(attn, dim=-1) + y = torch.matmul(attn, V) + y = y.permute((0, 4, 1, 2, 3, 5)) + y = y.reshape((B, Dq, H, W, C)) + y = self.proj(y) + return y + + +class TemporalTransformer(nn.Module): + def __init__(self, + channels, + mlp_dim_mul=1, + **kwargs + ): + super().__init__() + self.attn1 = TemporalAttention(channels, **kwargs) + self.attn2 = TemporalAttention(channels, **kwargs) + self.norm1 = nn.LayerNorm(channels) + self.norm2 = nn.LayerNorm(channels) + self.norm3 = nn.LayerNorm(channels) + self.mlp = MLP(channels, dim_mul=mlp_dim_mul) + + def forward(self, x, y): + x = self.attn1(self.norm1(x)) + x # self attention + x = self.attn2(self.norm2(x), y) + x # cross attention + return self.mlp(self.norm3(x)) + x # feed-forward + + +class MLP(nn.Sequential): + def __init__(self, dim, dim_mul=4): + inner_dim = dim * dim_mul + sequence = [ + nn.Linear(dim, inner_dim), + nn.SiLU(), + nn.Linear(inner_dim, dim) + ] + super().__init__(*sequence) + + +def positional_encoding(position, dims, add_dims=()): + div_term = torch.exp( + torch.arange(0, dims, 2, device=position.device) * + (-math.log(10000.0) / dims) + ) + if position.ndim == 1: + arg = position[:, None] * div_term[None, :] + else: + arg = position[:, :, None] * div_term[None, None, :] + + pos_enc = torch.concat( + [torch.sin(arg), torch.cos(arg)], + dim=-1 + ) + if add_dims: + for dim in add_dims: + pos_enc = pos_enc.unsqueeze(dim) + return pos_enc +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","Benchmark/IrradianceNet.py",".py","11607","292","import torch.nn as nn +from collections import OrderedDict +import torch +import logging +import pytorch_lightning as pl + + +def make_layers(block): + layers = [] + for layer_name, v in block.items(): + if 'pool' in layer_name: + layer = nn.MaxPool2d(kernel_size=v[0], stride=v[1], padding=v[2]) + layers.append((layer_name, layer)) + elif 'deconv' in layer_name: + transposeConv2d = nn.ConvTranspose2d(in_channels=v[0], + out_channels=v[1], + kernel_size=v[2], + stride=v[3], + padding=v[4]) + layers.append((layer_name, transposeConv2d)) + if 'relu' in layer_name: + layers.append(('relu_' + layer_name, nn.ReLU(inplace=True))) + elif 'leaky' in layer_name: + layers.append(('leaky_' + layer_name, + nn.LeakyReLU(negative_slope=0.2, inplace=True))) + elif 'conv' in layer_name: + conv2d = nn.Conv2d(in_channels=v[0], + out_channels=v[1], + kernel_size=v[2], + stride=v[3], + padding=v[4]) + layers.append((layer_name, conv2d)) + if 'relu' in layer_name: + layers.append(('relu_' + layer_name, nn.ReLU(inplace=True))) + elif 'leaky' in layer_name: + layers.append(('leaky_' + layer_name, + nn.LeakyReLU(negative_slope=0.2, inplace=True))) + else: + raise NotImplementedError + return nn.Sequential(OrderedDict(layers)) + + +class CLSTM_cell(nn.Module): + """"""ConvLSTMCell + """""" + + def __init__(self, shape, input_channels, filter_size, num_features, seq_len=8, device='cpu'): + super(CLSTM_cell, self).__init__() + + self.shape = shape # H, W + self.input_channels = input_channels + self.filter_size = filter_size + self.device = device + self.num_features = num_features + # in this way the output has the same size + self.padding = (filter_size - 1) // 2 + self.conv = nn.Sequential( + nn.Conv2d(self.input_channels + self.num_features, + 4 * self.num_features, self.filter_size, 1, + self.padding), + nn.GroupNorm(4 * self.num_features // 32, 4 * self.num_features) # best for regression + ) + + self.seq_len = seq_len + + def forward(self, inputs=None, hidden_state=None): + if hidden_state is None: + hx = torch.zeros(inputs.size(1), self.num_features, self.shape[0], + self.shape[1]).to(self.device) + cx = torch.zeros(inputs.size(1), self.num_features, self.shape[0], + self.shape[1]).to(self.device) + else: + hx, cx = hidden_state + output_inner = [] + for index in range(self.seq_len): + if inputs is None: + x = torch.zeros(hx.size(0), self.input_channels, self.shape[0], + self.shape[1]).to(self.device) + else: + x = inputs[index, ...] + + combined = torch.cat((x, hx), 1) + gates = self.conv(combined) # gates: S, num_features*4, H, W + + # it should return 4 tensors: i,f,g,o + ingate, forgetgate, cellgate, outgate = torch.split( + gates, self.num_features, dim=1) + ingate = torch.sigmoid(ingate) + forgetgate = torch.sigmoid(forgetgate) + cellgate = torch.tanh(cellgate) + outgate = torch.sigmoid(outgate) + + cy = (forgetgate * cx) + (ingate * cellgate) + hy = outgate * torch.tanh(cy) + output_inner.append(hy) + hx = hy + cx = cy + return torch.stack(output_inner), (hy, cy) + + +def convlstm_encoder_params(in_chan=7, image_size=128, device='cpu'): + size_l1 = image_size + size_l2 = image_size - (image_size // 4) + size_l3 = image_size - (image_size // 2) + size_l4 = size_l1 - size_l2 + + convlstm_encoder_params = [ + [ + OrderedDict({'conv1_leaky_1': [in_chan, size_l4, 3, 1, 1]}), # [1, 32, 3, 1, 1] + OrderedDict({'conv2_leaky_1': [size_l3, size_l3, 3, 2, 1]}), + OrderedDict({'conv3_leaky_1': [size_l2, size_l2, 3, 2, 1]}), + ], + [ + CLSTM_cell(shape=(size_l1, size_l1), input_channels=size_l4, filter_size=5, num_features=size_l3, + seq_len=4, device=device), + CLSTM_cell(shape=(size_l3, size_l3), input_channels=size_l3, filter_size=5, num_features=size_l2, + seq_len=4, device=device), + CLSTM_cell(shape=(size_l4, size_l4), input_channels=size_l2, filter_size=5, num_features=size_l1, + seq_len=4, device=device) + ] + ] + return convlstm_encoder_params + + +def convlstm_decoder_params(seq_len, image_size=128, device='cpu'): + size_l1 = image_size + size_l2 = image_size - (image_size // 4) + size_l3 = image_size - (image_size // 2) + size_l4 = size_l1 - size_l2 + + convlstm_decoder_params = [ + [ + OrderedDict({'deconv1_leaky_1': [size_l1, size_l1, 4, 2, 1]}), + OrderedDict({'deconv2_leaky_1': [size_l2, size_l2, 4, 2, 1]}), + OrderedDict({ + 'conv3_leaky_1': [size_l3, size_l4, 3, 1, 1], + 'conv4_leaky_1': [size_l4, 1, 1, 1, 0] + }), + ], + [ + CLSTM_cell(shape=(size_l4, size_l4), input_channels=size_l1, filter_size=5, num_features=size_l1, + seq_len=4, device=device), + CLSTM_cell(shape=(size_l3, size_l3), input_channels=size_l1, filter_size=5, num_features=size_l2, + seq_len=4, device=device), + CLSTM_cell(shape=(size_l1, size_l1), input_channels=size_l2, filter_size=5, num_features=size_l3, + seq_len=4, device=device) + ] + ] + return convlstm_decoder_params + + +class Encoder(nn.Module): + def __init__(self, subnets, rnns): + super().__init__() + assert len(subnets) == len(rnns) + self.blocks = len(subnets) + + for index, (params, rnn) in enumerate(zip(subnets, rnns), 1): + # index sign from 1 + setattr(self, 'stage' + str(index), make_layers(params)) + setattr(self, 'rnn' + str(index), rnn) + + def forward_by_stage(self, inputs, subnet, rnn): + seq_number, batch_size, input_channel, height, width = inputs.size() + inputs = torch.reshape(inputs, (-1, input_channel, height, width)) + inputs = subnet(inputs) + inputs = torch.reshape(inputs, (seq_number, batch_size, inputs.size(1), + inputs.size(2), inputs.size(3))) + outputs_stage, state_stage = rnn(inputs, None) + return outputs_stage, state_stage + + def forward(self, inputs): + inputs = inputs.transpose(0, 1) # to S,B,1,64,64 + hidden_states = [] + logging.debug(inputs.size()) + for i in range(1, self.blocks + 1): + inputs, state_stage = self.forward_by_stage( + inputs, getattr(self, 'stage' + str(i)), + getattr(self, 'rnn' + str(i))) + hidden_states.append(state_stage) + return tuple(hidden_states) + + +class Decoder(nn.Module): + def __init__(self, subnets, rnns, seq_len): + super().__init__() + assert len(subnets) == len(rnns) + + self.blocks = len(subnets) + self.seq_len = seq_len + + for index, (params, rnn) in enumerate(zip(subnets, rnns)): + setattr(self, 'rnn' + str(self.blocks - index), rnn) + setattr(self, 'stage' + str(self.blocks - index), + make_layers(params)) + + def forward_by_stage(self, inputs, state, subnet, rnn): + inputs, state_stage = rnn(inputs, state) # , seq_len=8 + seq_number, batch_size, input_channel, height, width = inputs.size() + inputs = torch.reshape(inputs, (-1, input_channel, height, width)) + inputs = subnet(inputs) + inputs = torch.reshape(inputs, (seq_number, batch_size, inputs.size(1), + inputs.size(2), inputs.size(3))) + return inputs + + # input: 5D S*B*C*H*W + + def forward(self, hidden_states): + inputs = self.forward_by_stage(None, hidden_states[-1], + getattr(self, 'stage3'), + getattr(self, 'rnn3')) + for i in list(range(1, self.blocks))[::-1]: + inputs = self.forward_by_stage(inputs, hidden_states[i - 1], + getattr(self, 'stage' + str(i)), + getattr(self, 'rnn' + str(i))) + inputs = inputs.transpose(0, 1) # to B,S,1,64,64 + return inputs + + +class ConvLSTM_patch(nn.Module): + + def __init__(self, seq_len, in_chan=7, image_size=128, device='cpu'): + super(ConvLSTM_patch, self).__init__() + encoder_params = convlstm_encoder_params(in_chan, image_size, device=device) + decoder_params = convlstm_decoder_params(seq_len, image_size, device=device) + + self.encoder = Encoder(encoder_params[0], encoder_params[1]) + self.decoder = Decoder(decoder_params[0], decoder_params[1], seq_len=seq_len) + + def forward(self, x, future_seq=10): + x = x.permute(0, 1, 4, 2, 3) + state = self.encoder(x) + output = self.decoder(state) + + return output + + +class IrradianceNet(pl.LightningModule): + def __init__(self, model, opt_patience): + super().__init__() + self.model = model + self.opt_patience = opt_patience + + def forward(self, x): + x = x.permute(0, 2, 3, 4, 1) + y_pred1 = self.model(x).permute(0, 1, 3, 4, 2) + y_pred2 = self.model(y_pred1).permute(0, 1, 3, 4, 2) + y_pred = torch.concat((y_pred1, y_pred2), axis=1).permute(0, 4, 1, 2, 3) + return y_pred + + def _loss(self, batch): + x, y = batch + y_pred = self.forward(x) + return (y - y_pred).square().mean() + + def training_step(self, batch, batch_idx): + loss = self._loss(batch) + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + self.log('train_loss', loss, **log_params) + return loss + + @torch.no_grad() + def val_test_step(self, batch, batch_idx, split=""val""): + loss = self._loss(batch) + log_params = {""on_step"": False, ""on_epoch"": True, ""prog_bar"": True, ""sync_dist"": True} + self.log(f""{split}_loss"", loss, **log_params) + + def validation_step(self, batch, batch_idx): + self.val_test_step(batch, batch_idx, split=""val"") + + def test_step(self, batch, batch_idx): + self.val_test_step(batch, batch_idx, split=""test"") + + def configure_optimizers(self): + optimizer = torch.optim.AdamW( + self.parameters(), lr=0.002, + betas=(0.5, 0.9), weight_decay=1e-3 + ) + reduce_lr = torch.optim.lr_scheduler.ReduceLROnPlateau( + optimizer, patience=self.opt_patience, factor=0.5, verbose=True + ) + + optimizer_spec = { + ""optimizer"": optimizer, + ""lr_scheduler"": { + ""scheduler"": reduce_lr, + ""monitor"": ""val_loss"", + ""frequency"": 1, + }, + } + return optimizer_spec +","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","Test/Test_IrrNet.py",".py","4168","103","from validation_utils import get_diffusion_model +from Benchmark.IrradianceNet import ConvLSTM_patch, IrradianceNet +import numpy as np +from utils import save_pkl, open_pkl, get_full_images, get_full_coordinates, remap +import torch +import os +import sys +from yaml import load, Loader + +print(sys.argv) +start = int(sys.argv[1]) +end = int(sys.argv[2]) +test_name = sys.argv[3] +model_config_path = sys.argv[4] +model_path = sys.argv[5] + +def interpolate_yhat(yhat): + yhat = yhat.detach() + yhat[:, 125:131] = np.nan + yhat[:, :, 125:131] = np.nan + + # rows + for t in range(yhat.shape[0]): + row_start_vals = yhat[t][124] + row_end_vals = yhat[t][131] + diff_interpolate = (row_start_vals - row_end_vals) / 7 + diff_interpolate = diff_interpolate.unsqueeze(0) # .repeat(6, 1) + diff_interpolate = diff_interpolate.repeat(6, 1) + vals = np.arange(1, 7) + vals = vals[np.newaxis, :] + vals = np.repeat(vals, diff_interpolate.shape[1], axis=0) + + interpol_values = diff_interpolate.detach() * vals.T + interpol_values = row_start_vals.unsqueeze(0).repeat(6, 1) - interpol_values + yhat[t, 125:131] = interpol_values + + col_start_vals = yhat[t][:, 124] + col_end_vals = yhat[t][:, 131] + diff_interpolate = (col_start_vals - col_end_vals) / 7 + diff_interpolate = diff_interpolate.unsqueeze(0) # .repeat(6, 1) + diff_interpolate = diff_interpolate.repeat(6, 1) + vals = np.arange(1, 7) + vals = vals[np.newaxis, :] + vals = np.repeat(vals, diff_interpolate.shape[1], axis=0) + + interpol_values = diff_interpolate.detach() * vals.T + interpol_values = col_start_vals.unsqueeze(0).repeat(6, 1) - interpol_values + yhat[t, :, 125:131] = interpol_values.T + return yhat + +def main(): + nowcast_net = ConvLSTM_patch(in_chan=1, image_size=128, device='cuda', seq_len=8) + irradiance_net = IrradianceNet(nowcast_net, + opt_patience=5).to('cuda') + + checkpoint = torch.load(model_path) + irradiance_net.load_state_dict(checkpoint['state_dict']) + model_config = open_pkl(model_config_path) + model_id = model_config['ID'] + + with open('/scratch/snx3000/acarpent/Test_Results/{}/config.yml'.format(test_name), + 'r') as o: + test_config = load(o, Loader) + + data_path='/scratch/snx3000/acarpent/HelioMontDataset/{}/KI/'.format(test_config['dataset_name']) + n_ens = test_config['n_ens'] + ddim_steps = test_config['ddim_steps'] + x_max = test_config['x_max'] + x_min = test_config['x_min'] + y_max = test_config['y_max'] + y_min = test_config['y_min'] + patches_idx = test_config['patches_idx'] + + date_idx_dict = open_pkl('/scratch/snx3000/acarpent/Test_Results/{}/Test_date_idx.pkl'.format(test_name)) + test_days = list(date_idx_dict.keys()) + print(test_days) + forecast_dict = {} + + for date in test_days[start:end]: + full_maps, idx_lst, t = get_full_images(date, data_path=data_path, patches_idx=patches_idx) + # idx = np.random.choice(list(idx_lst), replace=False, size=n_per_day) + idx = date_idx_dict[date] + print(date) + for i in idx: + x = torch.Tensor(full_maps[i:i+4, y_min:y_max, x_min:x_max]) + y = torch.Tensor(full_maps[i+4:i+12, y_min:y_max, x_min:x_max]) + x,y = x.reshape(1,1,*x.shape).to('cuda'), y.reshape(1,1,*y.shape).to('cuda') + + yhat = torch.zeros((8, 256, 256)) + for x_i in [0, 128]: + for y_i in [0, 128]: + yhat[:, x_i:x_i+128, y_i:y_i+128] = irradiance_net(x[:, :, :, x_i:x_i+128, y_i:y_i+128]).detach() + + yhat = interpolate_yhat(yhat) + yhat[yhat<-1] = -1 + yhat[yhat>1] = 1 + forecast_dict[t[i]] = np.array(yhat.numpy()).astype(np.float32) + save_pkl('/scratch/snx3000/acarpent/Test_Results/{}/{}-forecast_dict_{}.pkl'.format(test_name, model_id, date), forecast_dict) + forecast_dict = {} + print('###################################### SAVED ######################################') + +if __name__ == '__main__': + main()","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","Test/Test_SHADECast.py",".py","3159","77","from validation_utils import get_diffusion_model +from SHADECast.Models.Sampler.PLMS import PLMSSampler +import numpy as np +from utils import save_pkl, open_pkl, get_full_images, get_full_coordinates, remap +import torch +import os +import sys +from yaml import load, Loader + +print(sys.argv) +start = int(sys.argv[1]) +end = int(sys.argv[2]) +test_name = sys.argv[3] +model_config_path = sys.argv[4] +model_path = sys.argv[5] + +def main(): + ldm, model_config = get_diffusion_model(model_config_path, + model_path) + model_id = model_config['ID'] + + with open('/scratch/snx3000/acarpent/Test_Results/{}/config.yml'.format(test_name), + 'r') as o: + test_config = load(o, Loader) + + data_path='/scratch/snx3000/acarpent/HelioMontDataset/{}/KI/'.format(test_config['dataset_name']) + n_ens = test_config['n_ens'] + ddim_steps = test_config['ddim_steps'] + x_max = test_config['x_max'] + x_min = test_config['x_min'] + y_max = test_config['y_max'] + y_min = test_config['y_min'] + patches_idx = test_config['patches_idx'] + + date_idx_dict = open_pkl('/scratch/snx3000/acarpent/Test_Results/{}/Test_date_idx.pkl'.format(test_name)) + test_days = list(date_idx_dict.keys()) + print(test_days) + ldm = ldm.to('cuda') + sampler = PLMSSampler(ldm, verbose=False) + forecast_dict = {} + + print('Testing started') + + for date in test_days[start:end]: + full_maps, idx_lst, t = get_full_images(date, data_path=data_path, patches_idx=patches_idx) + # idx = np.random.choice(list(idx_lst), replace=False, size=n_per_day) + idx = date_idx_dict[date] + print(date) + for i in idx: + x = torch.Tensor(full_maps[i:i+4, y_min:y_max, x_min:x_max]) + y = torch.Tensor(full_maps[i+4:i+12, y_min:y_max, x_min:x_max]) + x,y = x.reshape(1,1,*x.shape).to('cuda'), y.reshape(1,1,*y.shape).to('cuda') + # enc_x, _ = ldm.autoencoder.encode(x) + enc_y, _ = ldm.autoencoder.encode(y) + x = torch.cat([x for _ in range(n_ens)]).to('cuda') + cond = ldm.context_encoder(x) + samples_ddim, _ = sampler.sample(S=ddim_steps, + conditioning=cond, + batch_size=n_ens, + shape=tuple(enc_y.shape[1:]), + verbose=False, + eta=0.) + + yhat = ldm.autoencoder.decode(samples_ddim.to('cuda')) + yhat = yhat.to('cpu').detach().numpy()[:,0] + yhat[yhat<-1] = -1 + yhat[yhat>1] = 1 + print(yhat.shape) + # ens_members.append(yhat_) + + forecast_dict[t[i]] = np.array(yhat).astype(np.float32) + save_pkl('/scratch/snx3000/acarpent/Test_Results/{}/{}_{}-ddim_forecast_dict_{}.pkl'.format(test_name, model_id, ddim_steps, date), forecast_dict) + forecast_dict = {} + print('###################################### SAVED ######################################') + +if __name__ == '__main__': + main()","Python" +"Nowcasting","EnergyWeatherAI/GenerativeNowcasting","Dataset/dataset.py",".py","3810","99","from utils import open_pkl, save_pkl +from torch.utils.data import Dataset +import pytorch_lightning as pl +import torch +import os +import numpy as np +from torch.nn import AvgPool3d + + +class KIDataset(Dataset): + def __init__(self, + data_path, + coordinate_data_path, + n, + length=None, + return_all=False, + forecast=False, + validation=False, + return_t=False, + **kwargs): + super().__init__() + self.data_path = data_path + self.coordinate_data_path = coordinate_data_path + self.return_all = return_all + self.forecast = forecast + self.validation = validation + self.return_t = return_t + f = os.listdir(self.data_path) + self.filenames = [] + self.n = n + if length is None: + self.filenames += f + else: + while length > len(self.filenames): + self.filenames += f + self.filenames = self.filenames[:length] + self.nitems = len(self.filenames) + if self.validation: + np.random.seed(0) + self.seeds = np.random.randint(0, 1000000, self.nitems) + if self.return_t: + self.t_lst = np.random.randint(0, 1000, self.nitems) + self.norm_method = kwargs['norm_method'] if 'norm_method' in kwargs else 'rescaling' + if self.norm_method == 'normalization': + self.a, self.b = kwargs['mean'], kwargs['std'] + elif self.norm_method == 'rescaling': + self.a, self.b = kwargs['min'], kwargs['max'] + + def to_tensor(self, x): + return torch.FloatTensor(x) + + def __getitem__(self, idx): + item_idx = self.filenames[idx] + if self.validation: + seed = self.seeds[idx] + np.random.seed(seed) + if self.return_t: + t = int(self.t_lst[idx]) + coord_idx = int(item_idx.split('_')[1].split('.')[0]) + item_dict = open_pkl(self.data_path + item_idx) + + starting_idx = np.random.choice(item_dict['starting_idx'], 1, replace=False)[0] + if self.validation: + print(idx, starting_idx) + seq = np.array(item_dict['ki_maps'])[starting_idx:starting_idx + self.n] + seq = seq.reshape(1, *seq.shape) + + if self.norm_method == 'normalization': + seq = (seq - self.a) / self.b + elif self.norm_method == 'rescaling': + seq = 2 * ((seq - self.a) / (self.b - self.a)) - 1 + + if self.return_all: + lon = np.array(open_pkl(self.coordinate_data_path + '{}_lon.pkl'.format(coord_idx))) + lat = np.array(open_pkl(self.coordinate_data_path + '{}_lat.pkl'.format(coord_idx))) + alt = np.array(open_pkl(self.coordinate_data_path + '{}_alt.pkl'.format(coord_idx))) + lon = 2 * ((lon - 0) / (90 - 0)) - 1 + lat = 2 * ((lat - 0) / (90 - 0)) - 1 + alt = 2 * ((alt - (-13)) / (4294 - 0)) - 1 + lon = lon.reshape(1, 1, *lon.shape) + lat = lat.reshape(1, 1, *lat.shape) + alt = alt.reshape(1, 1, *alt.shape) + c = np.concatenate((alt, lon, lat), axis=0) + if self.forecast: + return self.to_tensor(seq[:, :4]), self.to_tensor(seq[:, 4:]), self.to_tensor(c) + + else: + return self.to_tensor(seq), self.to_tensor(c) + else: + if self.forecast: + if self.return_t: + return self.to_tensor(seq[:, :4]), self.to_tensor(seq[:, 4:]), t + else: + return self.to_tensor(seq[:, :4]), self.to_tensor(seq[:, 4:]) + else: + return self.to_tensor(seq) + + def __len__(self): + return self.nitems","Python" +"Nowcasting","bugsuse/radar_nowcasting","models.py",".py","9933","255","import torch +import torch.nn as nn +import torch.nn.functional as F + + +class SmaAt_UNet(nn.Module): + def __init__(self, n_channels, n_classes, kernels_per_layer=2, bilinear=True, reduction_ratio=16): + super(SmaAt_UNet, self).__init__() + self.n_channels = n_channels + self.n_classes = n_classes + kernels_per_layer = kernels_per_layer + self.bilinear = bilinear + reduction_ratio = reduction_ratio + + self.inc = DoubleConvDS(self.n_channels, 64, kernels_per_layer=kernels_per_layer) + self.cbam1 = CBAM(64, reduction_ratio=reduction_ratio) + self.down1 = DownDS(64, 128, kernels_per_layer=kernels_per_layer) + self.cbam2 = CBAM(128, reduction_ratio=reduction_ratio) + self.down2 = DownDS(128, 256, kernels_per_layer=kernels_per_layer) + self.cbam3 = CBAM(256, reduction_ratio=reduction_ratio) + self.down3 = DownDS(256, 512, kernels_per_layer=kernels_per_layer) + self.cbam4 = CBAM(512, reduction_ratio=reduction_ratio) + factor = 2 if self.bilinear else 1 + self.down4 = DownDS(512, 1024 // factor, kernels_per_layer=kernels_per_layer) + self.cbam5 = CBAM(1024 // factor, reduction_ratio=reduction_ratio) + self.up1 = UpDS(1024, 512 // factor, self.bilinear, kernels_per_layer=kernels_per_layer) + self.up2 = UpDS(512, 256 // factor, self.bilinear, kernels_per_layer=kernels_per_layer) + self.up3 = UpDS(256, 128 // factor, self.bilinear, kernels_per_layer=kernels_per_layer) + self.up4 = UpDS(128, 64, self.bilinear, kernels_per_layer=kernels_per_layer) + + self.outc = OutConv(64, self.n_classes) + + def forward(self, x): + x1 = self.inc(x) + x1Att = self.cbam1(x1) + x2 = self.down1(x1) + x2Att = self.cbam2(x2) + x3 = self.down2(x2) + x3Att = self.cbam3(x3) + x4 = self.down3(x3) + x4Att = self.cbam4(x4) + x5 = self.down4(x4) + x5Att = self.cbam5(x5) + x = self.up1(x5Att, x4Att) + x = self.up2(x, x3Att) + x = self.up3(x, x2Att) + x = self.up4(x, x1Att) + logits = self.outc(x) + return logits + + +class DepthToSpace(nn.Module): + + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, self.bs, self.bs, C // (self.bs ** 2), H, W) # (N, bs, bs, C//bs^2, H, W) + x = x.permute(0, 3, 4, 1, 5, 2).contiguous() # (N, C//bs^2, H, bs, W, bs) + x = x.view(N, C // (self.bs ** 2), H * self.bs, W * self.bs) # (N, C//bs^2, H * bs, W * bs) + return x + + +class SpaceToDepth(nn.Module): + # Expects the following shape: Batch, Channel, Height, Width + def __init__(self, block_size): + super().__init__() + self.bs = block_size + + def forward(self, x): + N, C, H, W = x.size() + x = x.view(N, C, H // self.bs, self.bs, W // self.bs, self.bs) # (N, C, H//bs, bs, W//bs, bs) + x = x.permute(0, 3, 5, 1, 2, 4).contiguous() # (N, bs, bs, C, H//bs, W//bs) + x = x.view(N, C * (self.bs ** 2), H // self.bs, W // self.bs) # (N, C*bs^2, H//bs, W//bs) + return x + + +class DepthwiseSeparableConv(nn.Module): + def __init__(self, in_channels, output_channels, kernel_size, padding=0, kernels_per_layer=1): + super(DepthwiseSeparableConv, self).__init__() + # In Tensorflow DepthwiseConv2D has depth_multiplier instead of kernels_per_layer + self.depthwise = nn.Conv2d(in_channels, in_channels * kernels_per_layer, kernel_size=kernel_size, padding=padding, + groups=in_channels) + self.pointwise = nn.Conv2d(in_channels * kernels_per_layer, output_channels, kernel_size=1) + + def forward(self, x): + x = self.depthwise(x) + x = self.pointwise(x) + return x + + +class DoubleDense(nn.Module): + def __init__(self, in_channels, hidden_neurons, output_channels): + super(DoubleDense, self).__init__() + self.dense1 = nn.Linear(in_channels, out_features=hidden_neurons) + self.dense2 = nn.Linear(in_features=hidden_neurons, out_features=hidden_neurons // 2) + self.dense3 = nn.Linear(in_features=hidden_neurons // 2, out_features=output_channels) + + def forward(self, x): + out = F.relu(self.dense1(x.view(x.size(0), -1))) + out = F.relu(self.dense2(out)) + out = self.dense3(out) + return out + + +class DoubleDSConv(nn.Module): + """"""(convolution => [BN] => ReLU) * 2"""""" + def __init__(self, in_channels, out_channels): + super().__init__() + self.double_ds_conv = nn.Sequential( + DepthwiseSeparableConv(in_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True), + DepthwiseSeparableConv(out_channels, out_channels, kernel_size=3, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + return self.double_ds_conv(x) + + +class Flatten(nn.Module): + def forward(self, x): + return x.view(x.size(0), -1) + + +class ChannelAttention(nn.Module): + def __init__(self, input_channels, reduction_ratio=16): + super(ChannelAttention, self).__init__() + self.input_channels = input_channels + self.avg_pool = nn.AdaptiveAvgPool2d(1) + self.max_pool = nn.AdaptiveMaxPool2d(1) + # https://github.com/luuuyi/CBAM.PyTorch/blob/master/model/resnet_cbam.py + # uses Convolutions instead of Linear + self.MLP = nn.Sequential( + Flatten(), + nn.Linear(input_channels, input_channels // reduction_ratio), + nn.ReLU(), + nn.Linear(input_channels // reduction_ratio, input_channels) + ) + + def forward(self, x): + # Take the input and apply average and max pooling + avg_values = self.avg_pool(x) + max_values = self.max_pool(x) + out = self.MLP(avg_values) + self.MLP(max_values) + scale = x * torch.sigmoid(out).unsqueeze(2).unsqueeze(3).expand_as(x) + return scale + + +class SpatialAttention(nn.Module): + def __init__(self, kernel_size=7): + super(SpatialAttention, self).__init__() + assert kernel_size in (3, 7), 'kernel size must be 3 or 7' + padding = 3 if kernel_size == 7 else 1 + self.conv = nn.Conv2d(2, 1, kernel_size=kernel_size, padding=padding, bias=False) + self.bn = nn.BatchNorm2d(1) + + def forward(self, x): + avg_out = torch.mean(x, dim=1, keepdim=True) + max_out, _ = torch.max(x, dim=1, keepdim=True) + out = torch.cat([avg_out, max_out], dim=1) + out = self.conv(out) + out = self.bn(out) + scale = x * torch.sigmoid(out) + return scale + + +class CBAM(nn.Module): + def __init__(self, input_channels, reduction_ratio=16, kernel_size=7): + super(CBAM, self).__init__() + self.channel_att = ChannelAttention(input_channels, reduction_ratio=reduction_ratio) + self.spatial_att = SpatialAttention(kernel_size=kernel_size) + + def forward(self, x): + out = self.channel_att(x) + out = self.spatial_att(out) + return out + +class OutConv(nn.Module): + def __init__(self, in_channels, out_channels): + super(OutConv, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1) + + def forward(self, x): + return self.conv(x) + + +class DoubleConvDS(nn.Module): + """"""(convolution => [BN] => ReLU) * 2"""""" + + def __init__(self, in_channels, out_channels, mid_channels=None, kernels_per_layer=1): + super().__init__() + if not mid_channels: + mid_channels = out_channels + + self.double_conv = nn.Sequential( + DepthwiseSeparableConv(in_channels, mid_channels, kernel_size=3, kernels_per_layer=kernels_per_layer, padding=1), + nn.BatchNorm2d(mid_channels), + nn.ReLU(inplace=True), + DepthwiseSeparableConv(mid_channels, out_channels, kernel_size=3, kernels_per_layer=kernels_per_layer, padding=1), + nn.BatchNorm2d(out_channels), + nn.ReLU(inplace=True) + ) + + def forward(self, x): + return self.double_conv(x) + + +class DownDS(nn.Module): + """"""Downscaling with maxpool then double conv"""""" + + def __init__(self, in_channels, out_channels, kernels_per_layer=1): + super().__init__() + self.maxpool_conv = nn.Sequential( + nn.MaxPool2d(2), + DoubleConvDS(in_channels, out_channels, kernels_per_layer=kernels_per_layer) + ) + + def forward(self, x): + return self.maxpool_conv(x) + + +class UpDS(nn.Module): + """"""Upscaling then double conv"""""" + + def __init__(self, in_channels, out_channels, bilinear=True, kernels_per_layer=1): + super().__init__() + + # if bilinear, use the normal convolutions to reduce the number of channels + if bilinear: + self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) + self.conv = DoubleConvDS(in_channels, out_channels, in_channels // 2, kernels_per_layer=kernels_per_layer) + else: + self.up = nn.ConvTranspose2d(in_channels, in_channels // 2, kernel_size=2, stride=2) + self.conv = DoubleConvDS(in_channels, out_channels, kernels_per_layer=kernels_per_layer) + + def forward(self, x1, x2): + x1 = self.up(x1) + # input is CHW + diffY = x2.size()[2] - x1.size()[2] + diffX = x2.size()[3] - x1.size()[3] + + x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2, + diffY // 2, diffY - diffY // 2]) + # if you have padding issues, see + # https://github.com/HaiyongJiang/U-Net-Pytorch-Unstructured-Buggy/commit/0e854509c2cea854e247a9c615f175f76fbb2e3a + # https://github.com/xiaopeng-liao/Pytorch-UNet/commit/8ebac70e633bac59fc22bb5195e513d5832fb3bd + x = torch.cat([x2, x1], dim=1) + return self.conv(x) + +","Python" +"Nowcasting","bugsuse/radar_nowcasting","dataset.py",".py","2439","61","import os +from glob import glob +import numpy as np +from PIL import Image +import torch +from torch.utils.data.dataset import Dataset + + +class RadarSets(Dataset): + def __init__(self, pics, img_size, mode='train'): + super(RadarSets, self).__init__() + self.pics = pics + self.mode = mode + self.height, self.width = img_size + self.train_path = f'{os.sep}'.join([i for i in pics[0].split(os.sep)[:-2]]) + + def __getitem__(self, index): + if self.mode not in ['test']: + mode = 'train' + else: + mode = 'test' + + inputs = [] + input_fn = os.path.join(self.train_path, 'examples', + f""{self.pics[index].split('/')[-1]}"", + f""{self.pics[index].split('/')[-1]}-inputs-{mode}.txt"") + input_img = np.loadtxt(input_fn, dtype=str) + inp_len = len(input_img) + + for i in range(0, inp_len): + img = Image.open(os.path.join(self.train_path, 'data', input_img[i])) + img = np.pad(img, ((10, 10), (10, 10)), 'constant', constant_values = (0, 0)) + img = np.array(Image.fromarray(img).resize((self.height, self.width))) + img = torch.from_numpy(img.astype(np.float32)) + inputs.append(img) + + if self.mode in ['train', 'valid']: + target_fn = os.path.join(self.train_path, 'examples', + f""{self.pics[index].split('/')[-1]}"", + f""{self.pics[index].split('/')[-1]}-targets-train.txt"") + target_img = np.loadtxt(target_fn, dtype=str) + tar_len = len(target_img) + + targets = [] + for i in range(0, tar_len): + img = Image.open(os.path.join(self.train_path, 'data', target_img[i])) + img = np.pad(img, ((10, 10), (10, 10)), 'constant', constant_values = (0, 0)) + img = np.array(Image.fromarray(img).resize((self.height, self.width))) + img = torch.from_numpy(img.astype(np.float32)) + targets.append(img) + + return torch.stack(inputs, dim=0)/255, torch.stack(targets, dim=0)/255 + elif self.mode in ['test']: + return torch.stack(inputs, dim=0)/255 + else: + raise ValueError(f'{self.mode} is unknown and should be among train, valid and test!') + + def __len__(self): + return len(self.pics) + +","Python" +"Nowcasting","bugsuse/radar_nowcasting","inference.py",".py","2243","72","import os +import sys +import random +import numpy as np +from glob import glob +import torch +from torch.utils.data import DataLoader + +from dataset import RadarSets +from models import SmaAt_UNet +from utils import check_dir, save_pred, array2img + + +if __name__ == '__main__': + + ckpts = 'ckpts/epoch_49_valid_4.356657_ckpt.pth' + save_dir = './infer' + device = 'cuda:0' + test_path = '/data/ml/test/examples/' + img_height = 256 + img_width = 256 + in_length = 10 + out_length = 10 + batch_size = 16 + num_workers = 12 + pin_memory = True + matplot = True + + visual = {'norm_max': 80, 'cmap': 'NWSRef'} + + check_dir(save_dir) + + print('load dataset to inference...') + # load dataset + test_path = np.array(glob(os.path.join(test_path, '*'))) + test_sets = RadarSets(test_path, (img_height, img_width), mode='test') + test_loader = DataLoader(test_sets, + batch_size=batch_size, + num_workers=num_workers, + pin_memory=pin_memory, + ) + +## set loss function + loss_fx = None + + ckpts = torch.load(ckpts)['model_state_dict'] + model = SmaAt_UNet(in_length, out_length).to(device) + model.load_state_dict(ckpts) + model.eval().to(device) + + with torch.no_grad(): + num = 0 + for i, input_ in enumerate(test_loader): + input_ = input_.to(device) + input_ = input_.reshape(batch_size, -1, img_height, img_width) + pred = model(input_) + + print(f'save prediction to {save_dir}...') + for b in range(pred.shape[0]): + for t in range(pred.shape[1]): + check_dir(save_dir + os.sep + f'{num+1:05d}') + save_path = save_dir + f'{os.sep}{num+1:05d}{os.sep}' + + if matplot: + print(pred[b, :].detach().cpu().numpy().max()) + save_pred(pred[b, :].detach().cpu().numpy() * visual['norm_max'], save_path) + else: + img = array2img(pred[b, t, 0].detach().cpu().numpy() * visual['norm_max'], visual['cmap']) + img.save(save_path + f'{t:02d}.png') + num += 1 + +","Python" +"Nowcasting","bugsuse/radar_nowcasting","utils.py",".py","4803","154","import os +import numpy as np +from PIL import Image +import matplotlib as mpl +mpl.use('Agg') +import matplotlib.pyplot as plt +from matplotlib import cm, colors + + +def check_dir(fn): + if not os.path.exists(fn): + os.makedirs(fn) + + +def get_learning_rate(optimizer): + lr=[] + for param_group in optimizer.param_groups: + lr +=[ param_group['lr'] ] + assert(len(lr)==1) + + return lr[0] + + +def adjust_colorbar(fig, axes, pad=0.02, width=0.02, shrink=0.8, xscale=0.95): + from mpl_toolkits.axes_grid1 import make_axes_locatable + + if isinstance(axes, np.ndarray): + cax, kw = mpl.colorbar.make_axes([ax for ax in axes.flat]) + + fig.canvas.draw() + pos = cax.get_position() + elif isinstance(axes, mpl.axes.Axes): + divider = make_axes_locatable(axes) + cax = divider.append_axes('right', size='5%', pad='3%') + kw = {'orientation': 'vertical', 'ticklocation': 'right'} + + fig.canvas.draw() + pos = axes.get_position() + else: + raise TypeError(f'No support axes type {type(ax)}!') + + ydf = (1-shrink)*(pos.ymax - pos.ymin)/2 + cax.remove() + + return fig.add_axes([pos.xmax*xscale+pad, pos.ymin+ydf, width, (pos.ymax-pos.ymin)-2*ydf]), kw + + +def array2img(array, cmap, vmin=0, vmax=70, rgb=False): + """""" + :params array(np.array): + :params cmap(str): colormap name + :params vmin(int): vmin for normalize + :params vmax(int): vmax for normalize + :params rgb(bool): if convert image to rgb format + Ref: + - https://stackoverflow.com/questions/10965417/how-to-convert-a-numpy-array-to-pil-image-applying-matplotlib-colormap + """""" + norm = colors.Normalize(vmin=vmin, vmax=vmax) + + if isinstance(cmap, str): + cms = cm.get_cmap(cmap) + elif isinstance(cmap, list) or isinstance(cmap, np.ndarray): + cms = colors.LinearSegmentedColormap.from_list('cmap', cmap) + else: + raise ValueError(f'Unknown type {type(cmap)}.') + + if rgb: + return Image.fromarray(np.uint8(cms(norm(array))*255)).convert('RGB') + else: + return Image.fromarray(np.uint8(cms(norm(array))*255)) + + +def save_pred(pred, save_path, configs=None): + """""" + :param pred(np.array): prediction + :param save_path(str): the path to save prediction + """""" + if pred.ndim != 3: + raise ValueError(f'The dimension of prediction should be 3, not {pred.ndim}!') + + for ti in range(pred.shape[0]): + fig, axes = plt.subplots(figsize=(9, 6)) + fig, axes = single_plot(pred[ti], ts=ti, fig=fig, axes=axes, title=True) + fig.savefig(f'{save_path}{os.sep}{ti:02d}.png', dpi=100, bbox_inches='tight') + + +def single_plot(pred, target=None, ts=0, fig=None, axes=None, title=True): + import seaborn as sns + + try: + import pyart + cmap = 'pyart_NWSRef' + except: + colev = [""#99DBEA"", ""#52A5D1"", ""#3753AD"", ""#80C505"", ""#52C10D"", + ""#35972A"", ""#FAE33B"", ""#EAB81E"", ""#F78C2C"", ""#E2331F"", + ""#992B27"", ""#471713"", ""#BC5CC2"", ""#975CC0""] + + cmap = colors.ListedColormap(colev) + + levels = np.arange(0, 71, 5) + norm = colors.BoundaryNorm(levels, cmap.N) + + sns.set_context('talk', font_scale=0.8) + + if target is not None: + if fig is None and axes is None: + fig, axes = plt.subplots(figsize=(12, 9), nrows=1, ncols=2) + plt.subplots_adjust(wspace=0.05, hspace=0.05) + + im1 = axes[0].imshow(target, vmin=0, vmax=70, cmap=cmap) + im2 = axes[1].imshow(pred, vmin=0, vmax=70, cmap=cmap) + + for i in range(axes.size): + axes[i].set_axis_off() + + if title: + axes[0].set_title(f'Target {ts}') + axes[1].set_title(f'Prediction {ts}') + + cax, kw = adjust_colorbar(fig, axes, shrink=0.53, xscale=0.91) + + cb = fig.colorbar(im2, cax=cax, **kw) + cb.set_ticks(np.arange(0, 71, 10)) + #cb.set_ticklabels(np.arange(0, 71, 10)) + cb.ax.tick_params(direction='in', left=True, right=True, length=3, + axis='both', which='major', labelsize=14) + else: + fig, axes = plt.subplots(figsize=(9, 6)) + im1 = axes.imshow(pred, vmin=0, vmax=70, cmap=cmap) + + axes.set_axis_off() + + fig.colorbar(im1) + + return fig, axes + + +def plot_compare(targets, prec, epoch=None, save_path=None): + if epoch is None: + epoch = 'default' + if save_path is None: + save_path = '.' + + for ti in range(targets.shape[0]): + fig, axes = plt.subplots(figsize=(12, 9), nrows=1, ncols=2) + plt.subplots_adjust(wspace=0.05, hspace=0.05) + + fig, axes = single_plot(prec[ti], target=targets[ti], ts=ti, fig=fig, axes=axes, title=True) + + fig.savefig(f'{save_path}{os.sep}radar_{epoch}_time{ti:02d}.png', dpi=100, bbox_inches='tight') + + return fig, axes + +","Python" +"Nowcasting","bugsuse/radar_nowcasting","main.py",".py","5129","143","import os +import random +import logging +from tqdm import tqdm +from glob import glob +import numpy as np + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.optim import lr_scheduler +from torch.utils.data import DataLoader +from tensorboardX import SummaryWriter + +from dataset import RadarSets +from models import SmaAt_UNet +from utils import check_dir, get_learning_rate, plot_compare + + +if __name__ == '__main__': + + LOG_FORMAT = '%(asctime)s - %(levelname)s - %(message)s' + DATE_FORMAT = '%m/%d/%Y %H:%M:%S' + + data_dir = '/data/ml/train/examples/' + log_dir = './log' + ckpt_dir = './ckpts' + pred_dir = './pred' + + check_dir(log_dir) + check_dir(ckpt_dir) + check_dir(pred_dir) + + logging.basicConfig(filename=os.path.join(log_dir, 'smaat_unet.log'), + level=logging.INFO, + format=LOG_FORMAT, + datefmt=DATE_FORMAT + ) + + logging.info('setting parameters and building model...') + writer = SummaryWriter(logdir=log_dir) + + ## 常规参数设置 + device = 'cuda:0' + height = 256 + width = 256 + in_length = 10 + out_length = 10 + bs = 16 + lr = 0.001 + num_epochs = 50 + train_ratio = 0.8 + path = np.array(glob(os.path.join(data_dir, '*'))) + nums = len(path) + tnums = int(nums*train_ratio) + values = np.array(range(nums)) + + ## 恢复模型训练 + resume = None #'ckpts/epoch_49_valid_4.356657_ckpt.pth' + + ## 训练集和验证集分割 + random.shuffle(values) + path = path[values] + train_path = path[:tnums] + valid_path = path[tnums:] + + train_sets = RadarSets(train_path, (height, width), mode='train') + valid_sets = RadarSets(valid_path, (height, width), mode='valid') + train_loader = DataLoader(train_sets, batch_size=bs, num_workers=12, + pin_memory=True, shuffle=True, drop_last=True + ) + valid_loader = DataLoader(valid_sets, batch_size=bs, num_workers=12, + pin_memory=True, shuffle=False, drop_last=True + ) + + ## 模型,损失函数以及优化器设置 + model = SmaAt_UNet(in_length, out_length).to(device) + loss_func = lambda pred, obs: (F.l1_loss(pred, obs, reduction='mean') + F.mse_loss(pred, obs, reduction='mean')) + optimizer = torch.optim.Adam(model.parameters(), lr=lr) + scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', + factor=0.9, patience=3, + min_lr=0.000001, eps=0.000001, verbose=True + ) + if resume is not None: + ckpts = torch.load(resume) + + model.load_state_dict(ckpts['model_state_dict']) + start_epoch = ckpts['epoch'] + epoch_start = start_epoch + 1 + epoch_end = start_epoch + num_epochs + optimizer.load_state_dict(ckpts['optimizer_state_dict']) + loss_func.load_state_dict(ckpts['criterion_state_dict']) + else: + epoch_start = 0 + epoch_end = num_epochs + + for epoch in range(epoch_start, epoch_end): + train_loss = 0 + for inputs, targets in tqdm(train_loader): + torch.multiprocessing.freeze_support() + optimizer.zero_grad() + model.train() + inputs, targets = inputs.cuda(), targets.cuda() + enc_preds = model(inputs) + loss = loss_func(enc_preds, targets) + + loss.backward() + optimizer.step() + train_loss += loss.item() + + lr_rate = get_learning_rate(optimizer) + + model.eval() + with torch.no_grad(): + valid_loss = 0 + for inputs, targets in tqdm(valid_loader): + inputs, targets = inputs.cuda(), targets.cuda() + enc_preds = model(inputs) + valid_loss += loss_func(enc_preds, targets).item() + + writer.add_scalar('train_loss', train_loss, epoch) + writer.add_scalar('valid_loss', valid_loss, epoch) + logging.info(f'Epoch: {epoch+1}/{num_epochs}, lr: {lr_rate:.6f}, train loss: {train_loss:.6f}, valid loss: {valid_loss:.6f}') + + scheduler.step(valid_loss) + lr_rate = optimizer.param_groups[0]['lr'] + + checkpoint = {'model_state_dict': model.state_dict(), + 'optimizer_state_dict': optimizer.state_dict(), + 'criterion_state_dict': loss_func.state_dict(), + 'epoch': epoch} + torch.save(checkpoint, f'{ckpt_dir}/epoch_{epoch}_valid_{valid_loss:.6f}_ckpt.pth') + torch.save(model, f'{ckpt_dir}/epoch_{epoch}_valid_{valid_loss:.6f}_model.pth') + logging.info(f'save model to {ckpt_dir}/epoch_{epoch}_valid_{valid_loss:.6f}_ckpt.pth') + + idx = np.random.randint(bs-1) + fig, ax = plot_compare(targets[idx, :].detach().cpu().numpy()*70, + np.clip(enc_preds[idx, :].detach().cpu().numpy(), 0, 1)*70, + epoch=epoch, save_path=pred_dir) + + logging.info('train successful.') + +","Python"