diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..07f0db3339ad9053dc95b284c4ae14e014efff89 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,16 @@ +*.bin.* filter=lfs diff=lfs merge=lfs -text +*.lfs.* filter=lfs diff=lfs merge=lfs -text +*.bin filter=lfs diff=lfs merge=lfs -text +*.h5 filter=lfs diff=lfs merge=lfs -text +*.tflite filter=lfs diff=lfs merge=lfs -text +*.tar.gz filter=lfs diff=lfs merge=lfs -text +*.ot filter=lfs diff=lfs merge=lfs -text +*.onnx filter=lfs diff=lfs merge=lfs -text +*.arrow filter=lfs diff=lfs merge=lfs -text +*.ftz filter=lfs diff=lfs merge=lfs -text +*.joblib filter=lfs diff=lfs merge=lfs -text +*.model filter=lfs diff=lfs merge=lfs -text +*.msgpack filter=lfs diff=lfs merge=lfs -text +*.pb filter=lfs diff=lfs merge=lfs -text +*.pt filter=lfs diff=lfs merge=lfs -text +*.pth filter=lfs diff=lfs merge=lfs -text diff --git a/.github/workflows/check_size.yml b/.github/workflows/check_size.yml new file mode 100644 index 0000000000000000000000000000000000000000..3748d5acda16268a5a7c2159dc462bc4344570ed --- /dev/null +++ b/.github/workflows/check_size.yml @@ -0,0 +1,17 @@ +name: Check file size + +on: + pull_request: + branches: [main] + + # to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + sync-to-hub: + runs-on: ubuntu-latest + steps: + - name: Check large files + uses: ActionsDesk/lfs-warning@v2.0 + with: + filesizelimit: 10485760 # = 10MB, so we can sync to HF spaces diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml new file mode 100644 index 0000000000000000000000000000000000000000..78e16fa36d954d695ef1673c6d0df67d20640e33 --- /dev/null +++ b/.github/workflows/style.yml @@ -0,0 +1,20 @@ +name: Lint + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: psf/black@stable + - uses: actions/setup-python@v2 + with: + python-version: 3.9 + - name: Install requirements + run: pip install ".[dev]" + - uses: jamescurtin/isort-action@master diff --git a/.github/workflows/sync_to_hub.yml b/.github/workflows/sync_to_hub.yml new file mode 100644 index 0000000000000000000000000000000000000000..4d64e472d72a1f4b9c1e979bf97ec0ecd586befb --- /dev/null +++ b/.github/workflows/sync_to_hub.yml @@ -0,0 +1,20 @@ +name: Sync to Hugging Face hub + +on: + push: + branches: [main] + + # to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + sync-to-hub: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Push to hub + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + run: git push https://boris:$HF_TOKEN@huggingface.co/spaces/flax-community/dalle-mini main diff --git a/.github/workflows/sync_to_hub_debug.yml b/.github/workflows/sync_to_hub_debug.yml new file mode 100644 index 0000000000000000000000000000000000000000..07294c33e2ebbbbf9f4ab81c3bbef0f0547b82d9 --- /dev/null +++ b/.github/workflows/sync_to_hub_debug.yml @@ -0,0 +1,17 @@ +name: Deploy to debug app + +on: + # to run this workflow manually from the Actions tab + workflow_dispatch: + +jobs: + sync-to-hub-debug: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Push to hub + env: + HF_TOKEN: ${{ secrets.HF_TOKEN }} + run: git push --force https://boris:$HF_TOKEN@huggingface.co/spaces/flax-community/dalle-mini-debug +HEAD:main diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fb1edc979b073e52ef4ed28354a731881a994d03 --- /dev/null +++ b/.gitignore @@ -0,0 +1,6 @@ +__pycache__ +.ipynb_checkpoints +.streamlit +wandb/ +*.egg-info/ +jax_cache/ diff --git a/CITATION.cff b/CITATION.cff new file mode 100644 index 0000000000000000000000000000000000000000..08b92b415912a670705af0dcbebe329f2b6815a5 --- /dev/null +++ b/CITATION.cff @@ -0,0 +1,44 @@ +# YAML 1.2 +--- +abstract: "DALL·E mini is a JAX/Flax reimplementation of OpenAI's DALL·E that requires much smaller hardware resources. By simplifying the architecture and model memory requirements, as well as leveraging open-source code and pre-trained models, we were able to create a model that is 27 times smaller than the original DALL·E and train it on a single TPU v3-8 for only 3 days. DALL·E mini achieves impressive results, albeit of a lower quality than the original system. It can be used for exploration and further experimentation on commodity hardware." +authors: + - + family-names: Dayma + given-names: Boris + - + family-names: Patil + given-names: Suraj + - + family-names: Cuenca + given-names: Pedro + - + family-names: Saifullah + given-names: Khalid + - + family-names: Abraham + given-names: Tanishq + - + family-names: "Lê Khắc" + given-names: "Phúc" + - + family-names: Melas + given-names: Luke + - + family-names: Ghosh + given-names: Ritobrata +cff-version: "1.1.0" +date-released: 2021-07-29 +identifiers: +keywords: + - dalle + - "text-to-image generation" + - transformer + - "zero-shot" + - JAX +license: "Apache-2.0" +doi: 10.5281/zenodo.5146400 +message: "If you use this project, please cite it using these metadata." +repository-code: "https://github.com/borisdayma/dalle-mini" +title: "DALL·E Mini" +version: "v0.1-alpha" +... \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..fc7419b7a95e60c5b7c446fee264e8904eff67c8 --- /dev/null +++ b/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2021 The DALL·E mini Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..e418a64d4986ed7fc6401781b9b2743fcc7d85c6 --- /dev/null +++ b/Makefile @@ -0,0 +1,5 @@ +.PHONY: style + +style: + black . + isort . \ No newline at end of file diff --git a/README.md b/README.md index 11f784bbb29b3700509906fe8f610709f2ee584b..1759f1317cee8502e6727c061c0fb2e4a9ba7f9b 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,270 @@ --- title: DALL·E mini -metaTitle: "DALL·E mini by craiyon.com on Hugging Face" emoji: 🥑 colorFrom: yellow colorTo: green -sdk: static +sdk: streamlit +app_file: app/streamlit/app.py pinned: True -license: apache-2.0 --- + +# DALL·E Mini + +[![Join us on Discord](https://img.shields.io/discord/823813159592001537?color=5865F2&logo=discord&logoColor=white)](https://discord.gg/xBPBXfcFHd) + +_Generate images from a text prompt_ + + + +Our logo was generated with DALL·E mini using the prompt "logo of an armchair in the shape of an avocado". + +You can create your own pictures with [the demo](https://huggingface.co/spaces/flax-community/dalle-mini). + +## How does it work? + +Refer to [our report](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA). + +## Inference Pipeline + +To generate sample predictions and understand the inference pipeline step by step, refer to [`tools/inference/inference_pipeline.ipynb`](tools/inference/inference_pipeline.ipynb). + +[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/borisdayma/dalle-mini/blob/main/tools/inference/inference_pipeline.ipynb) + +## Contributing + +Join the community on the [DALLE-Pytorch Discord](https://discord.gg/xBPBXfcFHd). +Any contribution is welcome, from reporting issues to proposing fixes/improvements or testing the model with cool prompts! + +## Development + +### Dependencies Installation + +For inference only, use `pip install git+https://github.com/borisdayma/dalle-mini.git`. + +For development, clone the repo and use `pip install -e ".[dev]"`. +Before making a PR, check style with `make style`. + +### Image Encoder + +We use a VQGAN from [taming-transformers](https://github.com/CompVis/taming-transformers), which can also be fine-tuned. + +Use [patil-suraj/vqgan-jax](https://github.com/patil-suraj/vqgan-jax) if you want to convert a checkpoint to JAX (does not support Gumbel). + +Any image encoder that turns an image into a fixed sequence of tokens can be used. + +### Training of DALL·E mini + +Use [`tools/train/train.py`](tools/train/train.py). + +You can also adjust the [sweep configuration file](https://docs.wandb.ai/guides/sweeps) if you need to perform a hyperparameter search. + +## FAQ + +### Where to find the latest models? + +Trained models are on 🤗 Model Hub: + +- [VQGAN-f16-16384](https://huggingface.co/dalle-mini/vqgan_imagenet_f16_16384) for encoding/decoding images +- [DALL·E mini](https://huggingface.co/flax-community/dalle-mini) for generating images from a text prompt + +### Where does the logo come from? + +The "armchair in the shape of an avocado" was used by OpenAI when releasing DALL·E to illustrate the model's capabilities. Having successful predictions on this prompt represents a big milestone to us. + +## Acknowledgements + +- 🤗 Hugging Face for organizing [the FLAX/JAX community week](https://github.com/huggingface/transformers/tree/master/examples/research_projects/jax-projects) +- Google [TPU Research Cloud (TRC) program](https://sites.research.google/trc/) for providing computing resources +- [Weights & Biases](https://wandb.com/) for providing the infrastructure for experiment tracking and model management + +## Authors & Contributors + +DALL·E mini was initially developed by: + +- [Boris Dayma](https://github.com/borisdayma) +- [Suraj Patil](https://github.com/patil-suraj) +- [Pedro Cuenca](https://github.com/pcuenca) +- [Khalid Saifullah](https://github.com/khalidsaifullaah) +- [Tanishq Abraham](https://github.com/tmabraham) +- [Phúc Lê Khắc](https://github.com/lkhphuc) +- [Luke Melas](https://github.com/lukemelas) +- [Ritobrata Ghosh](https://github.com/ghosh-r) + +Many thanks to the people who helped make it better: + +- the [DALLE-Pytorch](https://discord.gg/xBPBXfcFHd) and [EleutherAI](https://www.eleuther.ai/) communities for testing and exchanging cool ideas +- [Rohan Anil](https://github.com/rohan-anil) for adding Distributed Shampoo optimizer +- [Phil Wang](https://github.com/lucidrains) has provided a lot of cool implementations of transformer variants and gives interesting insights with [x-transformers](https://github.com/lucidrains/x-transformers) +- [Katherine Crowson](https://github.com/crowsonkb) for [super conditioning](https://twitter.com/RiversHaveWings/status/1478093658716966912) + +## Citing DALL·E mini + +If you find DALL·E mini useful in your research or wish to refer, please use the following BibTeX entry. + +```text +@misc{Dayma_DALL·E_Mini_2021, + author = {Dayma, Boris and Patil, Suraj and Cuenca, Pedro and Saifullah, Khalid and Abraham, Tanishq and Lê Khắc, Phúc and Melas, Luke and Ghosh, Ritobrata}, + doi = {10.5281/zenodo.5146400}, + month = {7}, + title = {DALL·E Mini}, + url = {https://github.com/borisdayma/dalle-mini}, + year = {2021} +} +``` + +## References + +Original DALL·E from "[Zero-Shot Text-to-Image Generation](https://arxiv.org/abs/2102.12092)" with image quantization from "[Learning Transferable Visual Models From Natural Language Supervision](https://arxiv.org/abs/2103.00020)". + +Image encoder from "[Taming Transformers for High-Resolution Image Synthesis](https://arxiv.org/abs/2012.09841v2)". + +Sequence to sequence model based on "[BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension](https://arxiv.org/abs/1910.13461v1)" with implementation of a few variants: + +- "[GLU Variants Improve Transformer](https://arxiv.org/abs/2002.05202)" +- "[Deepnet: Scaling Transformers to 1,000 Layers](https://arxiv.org/abs/2203.00555)" +- "[NormFormer: Improved Transformer Pretraining with Extra Normalization](https://arxiv.org/abs/2110.09456)" +- "[Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030)" +- "[CogView: Mastering Text-to-Image Generation via Transformers](https://arxiv.org/abs/2105.13290v2)" +- "[Root Mean Square Layer Normalization](https://arxiv.org/abs/1910.07467)" +- "[Sinkformers: Transformers with Doubly Stochastic Attention](https://arxiv.org/abs/2110.11773)" + +Main optimizer (Distributed Shampoo) from "[Scalable Second Order Optimization for Deep Learning](https://arxiv.org/abs/2002.09018)". + +### Citations + +```text +@misc{ + title={Zero-Shot Text-to-Image Generation}, + author={Aditya Ramesh and Mikhail Pavlov and Gabriel Goh and Scott Gray and Chelsea Voss and Alec Radford and Mark Chen and Ilya Sutskever}, + year={2021}, + eprint={2102.12092}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +```text +@misc{ + title={Learning Transferable Visual Models From Natural Language Supervision}, + author={Alec Radford and Jong Wook Kim and Chris Hallacy and Aditya Ramesh and Gabriel Goh and Sandhini Agarwal and Girish Sastry and Amanda Askell and Pamela Mishkin and Jack Clark and Gretchen Krueger and Ilya Sutskever}, + year={2021}, + eprint={2103.00020}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +```text +@misc{ + title={Taming Transformers for High-Resolution Image Synthesis}, + author={Patrick Esser and Robin Rombach and Björn Ommer}, + year={2021}, + eprint={2012.09841}, + archivePrefix={arXiv}, + primaryClass={cs.CV} +} +``` + +```text +@misc{ + title={BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension}, + author={Mike Lewis and Yinhan Liu and Naman Goyal and Marjan Ghazvininejad and Abdelrahman Mohamed and Omer Levy and Ves Stoyanov and Luke Zettlemoyer}, + year={2019}, + eprint={1910.13461}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +```text +@misc{ + title={Scalable Second Order Optimization for Deep Learning}, + author={Rohan Anil and Vineet Gupta and Tomer Koren and Kevin Regan and Yoram Singer}, + year={2021}, + eprint={2002.09018}, + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +```text +@misc{ + title={GLU Variants Improve Transformer}, + author={Noam Shazeer}, + year={2020}, + url={https://arxiv.org/abs/2002.05202} +} +``` + +```text + @misc{ + title={DeepNet: Scaling transformers to 1,000 layers}, + author={Wang, Hongyu and Ma, Shuming and Dong, Li and Huang, Shaohan and Zhang, Dongdong and Wei, Furu}, + year={2022}, + eprint={2203.00555} + archivePrefix={arXiv}, + primaryClass={cs.LG} +} +``` + +```text +@misc{ + title={NormFormer: Improved Transformer Pretraining with Extra Normalization}, + author={Sam Shleifer and Jason Weston and Myle Ott}, + year={2021}, + eprint={2110.09456}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +```text +@inproceedings{ + title={Swin Transformer V2: Scaling Up Capacity and Resolution}, + author={Ze Liu and Han Hu and Yutong Lin and Zhuliang Yao and Zhenda Xie and Yixuan Wei and Jia Ning and Yue Cao and Zheng Zhang and Li Dong and Furu Wei and Baining Guo}, + booktitle={International Conference on Computer Vision and Pattern Recognition (CVPR)}, + year={2022} +} +``` + +```text +@misc{ + title = {CogView: Mastering Text-to-Image Generation via Transformers}, + author = {Ming Ding and Zhuoyi Yang and Wenyi Hong and Wendi Zheng and Chang Zhou and Da Yin and Junyang Lin and Xu Zou and Zhou Shao and Hongxia Yang and Jie Tang}, + year = {2021}, + eprint = {2105.13290}, + archivePrefix = {arXiv}, + primaryClass = {cs.CV} +} +``` + +```text +@misc{ + title = {Root Mean Square Layer Normalization}, + author = {Biao Zhang and Rico Sennrich}, + year = {2019}, + eprint = {1910.07467}, + archivePrefix = {arXiv}, + primaryClass = {cs.LG} +} +``` + +```text +@misc{ + title = {Sinkformers: Transformers with Doubly Stochastic Attention}, + url = {https://arxiv.org/abs/2110.11773}, + author = {Sander, Michael E. and Ablin, Pierre and Blondel, Mathieu and Peyré, Gabriel}, + publisher = {arXiv}, + year = {2021}, +} +``` + +```text +@misc{ + title = {Smooth activations and reproducibility in deep networks}, + url = {https://arxiv.org/abs/2010.09931}, + author = {Shamir, Gil I. and Lin, Dong and Coviello, Lorenzo}, + publisher = {arXiv}, + year = {2020}, +} +``` diff --git a/app/gradio/app_gradio.py b/app/gradio/app_gradio.py new file mode 100644 index 0000000000000000000000000000000000000000..40013735519c4f0bab10dce4a6466af236454151 --- /dev/null +++ b/app/gradio/app_gradio.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Uncomment to run on cpu +# import os +# os.environ["JAX_PLATFORM_NAME"] = "cpu" + +import random + +import gradio as gr +import jax +import numpy as np +from flax.jax_utils import replicate +from flax.training.common_utils import shard +from PIL import Image, ImageDraw, ImageFont + +# ## CLIP Scoring +from transformers import BartTokenizer, CLIPProcessor, FlaxCLIPModel +from vqgan_jax.modeling_flax_vqgan import VQModel + +from dalle_mini.model import CustomFlaxBartForConditionalGeneration + +DALLE_REPO = "flax-community/dalle-mini" +DALLE_COMMIT_ID = "4d34126d0df8bc4a692ae933e3b902a1fa8b6114" + +VQGAN_REPO = "flax-community/vqgan_f16_16384" +VQGAN_COMMIT_ID = "90cc46addd2dd8f5be21586a9a23e1b95aa506a9" + +tokenizer = BartTokenizer.from_pretrained(DALLE_REPO, revision=DALLE_COMMIT_ID) +model = CustomFlaxBartForConditionalGeneration.from_pretrained( + DALLE_REPO, revision=DALLE_COMMIT_ID +) +vqgan = VQModel.from_pretrained(VQGAN_REPO, revision=VQGAN_COMMIT_ID) + + +def captioned_strip(images, caption=None, rows=1): + increased_h = 0 if caption is None else 48 + w, h = images[0].size[0], images[0].size[1] + img = Image.new("RGB", (len(images) * w // rows, h * rows + increased_h)) + for i, img_ in enumerate(images): + img.paste(img_, (i // rows * w, increased_h + (i % rows) * h)) + + if caption is not None: + draw = ImageDraw.Draw(img) + font = ImageFont.truetype( + "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40 + ) + draw.text((20, 3), caption, (255, 255, 255), font=font) + return img + + +def custom_to_pil(x): + x = np.clip(x, 0.0, 1.0) + x = (255 * x).astype(np.uint8) + x = Image.fromarray(x) + if not x.mode == "RGB": + x = x.convert("RGB") + return x + + +def generate(input, rng, params): + return model.generate( + **input, + max_length=257, + num_beams=1, + do_sample=True, + prng_key=rng, + eos_token_id=50000, + pad_token_id=50000, + params=params, + ) + + +def get_images(indices, params): + return vqgan.decode_code(indices, params=params) + + +p_generate = jax.pmap(generate, "batch") +p_get_images = jax.pmap(get_images, "batch") + +bart_params = replicate(model.params) +vqgan_params = replicate(vqgan.params) + +clip = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32") +print("Initialize FlaxCLIPModel") +processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32") +print("Initialize CLIPProcessor") + + +def hallucinate(prompt, num_images=64): + prompt = [prompt] * jax.device_count() + inputs = tokenizer( + prompt, + return_tensors="jax", + padding="max_length", + truncation=True, + max_length=128, + ).data + inputs = shard(inputs) + + all_images = [] + for i in range(num_images // jax.device_count()): + key = random.randint(0, 1e7) + rng = jax.random.PRNGKey(key) + rngs = jax.random.split(rng, jax.local_device_count()) + indices = p_generate(inputs, rngs, bart_params).sequences + indices = indices[:, :, 1:] + + images = p_get_images(indices, vqgan_params) + images = np.squeeze(np.asarray(images), 1) + for image in images: + all_images.append(custom_to_pil(image)) + return all_images + + +def clip_top_k(prompt, images, k=8): + inputs = processor(text=prompt, images=images, return_tensors="np", padding=True) + outputs = clip(**inputs) + logits = outputs.logits_per_text + scores = np.array(logits[0]).argsort()[-k:][::-1] + return [images[score] for score in scores] + + +def compose_predictions(images, caption=None): + increased_h = 0 if caption is None else 48 + w, h = images[0].size[0], images[0].size[1] + img = Image.new("RGB", (len(images) * w, h + increased_h)) + for i, img_ in enumerate(images): + img.paste(img_, (i * w, increased_h)) + + if caption is not None: + draw = ImageDraw.Draw(img) + font = ImageFont.truetype( + "/usr/share/fonts/truetype/liberation2/LiberationMono-Bold.ttf", 40 + ) + draw.text((20, 3), caption, (255, 255, 255), font=font) + return img + + +def top_k_predictions(prompt, num_candidates=32, k=8): + images = hallucinate(prompt, num_images=num_candidates) + images = clip_top_k(prompt, images, k=k) + return images + + +def run_inference(prompt, num_images=32, num_preds=8): + images = top_k_predictions(prompt, num_candidates=num_images, k=num_preds) + predictions = captioned_strip(images) + output_title = f""" + {prompt} + """ + return (output_title, predictions) + + +outputs = [ + gr.outputs.HTML(label=""), # To be used as title + gr.outputs.Image(label=""), +] + +description = """ +DALL·E-mini is an AI model that generates images from any prompt you give! Generate images from text: +""" +gr.Interface( + run_inference, + inputs=[gr.inputs.Textbox(label="What do you want to see?")], + outputs=outputs, + title="DALL·E mini", + description=description, + article="

Created by Boris Dayma et al. 2021 | GitHub | Report

", + layout="vertical", + theme="huggingface", + examples=[ + ["an armchair in the shape of an avocado"], + ["snowy mountains by the sea"], + ], + allow_flagging=False, + live=False, + # server_port=8999 +).launch(share=True) diff --git a/app/gradio/requirements.txt b/app/gradio/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..45d170f9c0eca58b3776143d8cf5375566920916 --- /dev/null +++ b/app/gradio/requirements.txt @@ -0,0 +1,4 @@ +# Requirements for huggingface spaces +gradio>=2.2.3 +flax +transformers diff --git a/app/streamlit/app.py b/app/streamlit/app.py new file mode 100644 index 0000000000000000000000000000000000000000..101199533c9c1a4669ef89b9fdbb47b2a2865787 --- /dev/null +++ b/app/streamlit/app.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# coding: utf-8 + +import streamlit as st + +st.sidebar.markdown( + """ + +

+ +

+""", + unsafe_allow_html=True, +) +st.sidebar.markdown( + """ +___ +

+DALL·E mini is an AI model that generates images from any prompt you give! +

+ +

+Created by Boris Dayma et al. 2021 +
+GitHub | Project Report +

+ """, + unsafe_allow_html=True, +) + +st.header("DALL·E mini") +st.subheader("Generate images from text") + +container = st.empty() +container.markdown( + f""" + A new demo with a better model is now available [in this space](https://huggingface.co/spaces/dalle-mini/dalle-mini)! Check it out! + + For more information about the project, please visit: + * [Our GitHub repository](https://github.com/borisdayma/dalle-mini). + * [The project report](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA) we wrote during the initial JAX-Flax sprint organized by 🤗 Hugging Face. + + Stay tuned for larger and better models, and more technical details! +""" +) diff --git a/app/streamlit/img/loading.gif b/app/streamlit/img/loading.gif new file mode 100644 index 0000000000000000000000000000000000000000..497e42cdbcbc9e902de0c7f67358a23a4fc75cbb Binary files /dev/null and b/app/streamlit/img/loading.gif differ diff --git a/html2canvas.js b/html2canvas.js deleted file mode 100644 index 96e2dc5707b1a584ff7b3b583aea7c6c18d4ea76..0000000000000000000000000000000000000000 --- a/html2canvas.js +++ /dev/null @@ -1,7756 +0,0 @@ -/*! - * html2canvas 1.4.1 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ -(function (global, factory) { - typeof exports === 'object' && typeof module !== 'undefined' ? module.exports = factory() : - typeof define === 'function' && define.amd ? define(factory) : - (global = typeof globalThis !== 'undefined' ? globalThis : global || self, global.html2canvas = factory()); -}(this, (function () { 'use strict'; - - /*! ***************************************************************************** - Copyright (c) Microsoft Corporation. - - Permission to use, copy, modify, and/or distribute this software for any - purpose with or without fee is hereby granted. - - THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY - AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - PERFORMANCE OF THIS SOFTWARE. - ***************************************************************************** */ - /* global Reflect, Promise */ - - var extendStatics = function(d, b) { - extendStatics = Object.setPrototypeOf || - ({ __proto__: [] } instanceof Array && function (d, b) { d.__proto__ = b; }) || - function (d, b) { for (var p in b) if (Object.prototype.hasOwnProperty.call(b, p)) d[p] = b[p]; }; - return extendStatics(d, b); - }; - - function __extends(d, b) { - if (typeof b !== "function" && b !== null) - throw new TypeError("Class extends value " + String(b) + " is not a constructor or null"); - extendStatics(d, b); - function __() { this.constructor = d; } - d.prototype = b === null ? Object.create(b) : (__.prototype = b.prototype, new __()); - } - - var __assign = function() { - __assign = Object.assign || function __assign(t) { - for (var s, i = 1, n = arguments.length; i < n; i++) { - s = arguments[i]; - for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; - } - return t; - }; - return __assign.apply(this, arguments); - }; - - function __awaiter(thisArg, _arguments, P, generator) { - function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); } - return new (P || (P = Promise))(function (resolve, reject) { - function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } } - function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } } - function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); } - step((generator = generator.apply(thisArg, _arguments || [])).next()); - }); - } - - function __generator(thisArg, body) { - var _ = { label: 0, sent: function() { if (t[0] & 1) throw t[1]; return t[1]; }, trys: [], ops: [] }, f, y, t, g; - return g = { next: verb(0), "throw": verb(1), "return": verb(2) }, typeof Symbol === "function" && (g[Symbol.iterator] = function() { return this; }), g; - function verb(n) { return function (v) { return step([n, v]); }; } - function step(op) { - if (f) throw new TypeError("Generator is already executing."); - while (_) try { - if (f = 1, y && (t = op[0] & 2 ? y["return"] : op[0] ? y["throw"] || ((t = y["return"]) && t.call(y), 0) : y.next) && !(t = t.call(y, op[1])).done) return t; - if (y = 0, t) op = [op[0] & 2, t.value]; - switch (op[0]) { - case 0: case 1: t = op; break; - case 4: _.label++; return { value: op[1], done: false }; - case 5: _.label++; y = op[1]; op = [0]; continue; - case 7: op = _.ops.pop(); _.trys.pop(); continue; - default: - if (!(t = _.trys, t = t.length > 0 && t[t.length - 1]) && (op[0] === 6 || op[0] === 2)) { _ = 0; continue; } - if (op[0] === 3 && (!t || (op[1] > t[0] && op[1] < t[3]))) { _.label = op[1]; break; } - if (op[0] === 6 && _.label < t[1]) { _.label = t[1]; t = op; break; } - if (t && _.label < t[2]) { _.label = t[2]; _.ops.push(op); break; } - if (t[2]) _.ops.pop(); - _.trys.pop(); continue; - } - op = body.call(thisArg, _); - } catch (e) { op = [6, e]; y = 0; } finally { f = t = 0; } - if (op[0] & 5) throw op[1]; return { value: op[0] ? op[1] : void 0, done: true }; - } - } - - function __spreadArray(to, from, pack) { - if (pack || arguments.length === 2) for (var i = 0, l = from.length, ar; i < l; i++) { - if (ar || !(i in from)) { - if (!ar) ar = Array.prototype.slice.call(from, 0, i); - ar[i] = from[i]; - } - } - return to.concat(ar || from); - } - - var Bounds = /** @class */ (function () { - function Bounds(left, top, width, height) { - this.left = left; - this.top = top; - this.width = width; - this.height = height; - } - Bounds.prototype.add = function (x, y, w, h) { - return new Bounds(this.left + x, this.top + y, this.width + w, this.height + h); - }; - Bounds.fromClientRect = function (context, clientRect) { - return new Bounds(clientRect.left + context.windowBounds.left, clientRect.top + context.windowBounds.top, clientRect.width, clientRect.height); - }; - Bounds.fromDOMRectList = function (context, domRectList) { - var domRect = Array.from(domRectList).find(function (rect) { return rect.width !== 0; }); - return domRect - ? new Bounds(domRect.left + context.windowBounds.left, domRect.top + context.windowBounds.top, domRect.width, domRect.height) - : Bounds.EMPTY; - }; - Bounds.EMPTY = new Bounds(0, 0, 0, 0); - return Bounds; - }()); - var parseBounds = function (context, node) { - return Bounds.fromClientRect(context, node.getBoundingClientRect()); - }; - var parseDocumentSize = function (document) { - var body = document.body; - var documentElement = document.documentElement; - if (!body || !documentElement) { - throw new Error("Unable to get document size"); - } - var width = Math.max(Math.max(body.scrollWidth, documentElement.scrollWidth), Math.max(body.offsetWidth, documentElement.offsetWidth), Math.max(body.clientWidth, documentElement.clientWidth)); - var height = Math.max(Math.max(body.scrollHeight, documentElement.scrollHeight), Math.max(body.offsetHeight, documentElement.offsetHeight), Math.max(body.clientHeight, documentElement.clientHeight)); - return new Bounds(0, 0, width, height); - }; - - /* - * css-line-break 2.1.0 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var toCodePoints$1 = function (str) { - var codePoints = []; - var i = 0; - var length = str.length; - while (i < length) { - var value = str.charCodeAt(i++); - if (value >= 0xd800 && value <= 0xdbff && i < length) { - var extra = str.charCodeAt(i++); - if ((extra & 0xfc00) === 0xdc00) { - codePoints.push(((value & 0x3ff) << 10) + (extra & 0x3ff) + 0x10000); - } - else { - codePoints.push(value); - i--; - } - } - else { - codePoints.push(value); - } - } - return codePoints; - }; - var fromCodePoint$1 = function () { - var codePoints = []; - for (var _i = 0; _i < arguments.length; _i++) { - codePoints[_i] = arguments[_i]; - } - if (String.fromCodePoint) { - return String.fromCodePoint.apply(String, codePoints); - } - var length = codePoints.length; - if (!length) { - return ''; - } - var codeUnits = []; - var index = -1; - var result = ''; - while (++index < length) { - var codePoint = codePoints[index]; - if (codePoint <= 0xffff) { - codeUnits.push(codePoint); - } - else { - codePoint -= 0x10000; - codeUnits.push((codePoint >> 10) + 0xd800, (codePoint % 0x400) + 0xdc00); - } - if (index + 1 === length || codeUnits.length > 0x4000) { - result += String.fromCharCode.apply(String, codeUnits); - codeUnits.length = 0; - } - } - return result; - }; - var chars$2 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$2 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$2 = 0; i$2 < chars$2.length; i$2++) { - lookup$2[chars$2.charCodeAt(i$2)] = i$2; - } - - /* - * utrie 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars$1$1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$1$1 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$1$1 = 0; i$1$1 < chars$1$1.length; i$1$1++) { - lookup$1$1[chars$1$1.charCodeAt(i$1$1)] = i$1$1; - } - var decode$1 = function (base64) { - var bufferLength = base64.length * 0.75, len = base64.length, i, p = 0, encoded1, encoded2, encoded3, encoded4; - if (base64[base64.length - 1] === '=') { - bufferLength--; - if (base64[base64.length - 2] === '=') { - bufferLength--; - } - } - var buffer = typeof ArrayBuffer !== 'undefined' && - typeof Uint8Array !== 'undefined' && - typeof Uint8Array.prototype.slice !== 'undefined' - ? new ArrayBuffer(bufferLength) - : new Array(bufferLength); - var bytes = Array.isArray(buffer) ? buffer : new Uint8Array(buffer); - for (i = 0; i < len; i += 4) { - encoded1 = lookup$1$1[base64.charCodeAt(i)]; - encoded2 = lookup$1$1[base64.charCodeAt(i + 1)]; - encoded3 = lookup$1$1[base64.charCodeAt(i + 2)]; - encoded4 = lookup$1$1[base64.charCodeAt(i + 3)]; - bytes[p++] = (encoded1 << 2) | (encoded2 >> 4); - bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2); - bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63); - } - return buffer; - }; - var polyUint16Array$1 = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 2) { - bytes.push((buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - var polyUint32Array$1 = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 4) { - bytes.push((buffer[i + 3] << 24) | (buffer[i + 2] << 16) | (buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - - /** Shift size for getting the index-2 table offset. */ - var UTRIE2_SHIFT_2$1 = 5; - /** Shift size for getting the index-1 table offset. */ - var UTRIE2_SHIFT_1$1 = 6 + 5; - /** - * Shift size for shifting left the index array values. - * Increases possible data size with 16-bit index values at the cost - * of compactability. - * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY. - */ - var UTRIE2_INDEX_SHIFT$1 = 2; - /** - * Difference between the two shift sizes, - * for getting an index-1 offset from an index-2 offset. 6=11-5 - */ - var UTRIE2_SHIFT_1_2$1 = UTRIE2_SHIFT_1$1 - UTRIE2_SHIFT_2$1; - /** - * The part of the index-2 table for U+D800..U+DBFF stores values for - * lead surrogate code _units_ not code _points_. - * Values for lead surrogate code _points_ are indexed with this portion of the table. - * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.) - */ - var UTRIE2_LSCP_INDEX_2_OFFSET$1 = 0x10000 >> UTRIE2_SHIFT_2$1; - /** Number of entries in a data block. 32=0x20 */ - var UTRIE2_DATA_BLOCK_LENGTH$1 = 1 << UTRIE2_SHIFT_2$1; - /** Mask for getting the lower bits for the in-data-block offset. */ - var UTRIE2_DATA_MASK$1 = UTRIE2_DATA_BLOCK_LENGTH$1 - 1; - var UTRIE2_LSCP_INDEX_2_LENGTH$1 = 0x400 >> UTRIE2_SHIFT_2$1; - /** Count the lengths of both BMP pieces. 2080=0x820 */ - var UTRIE2_INDEX_2_BMP_LENGTH$1 = UTRIE2_LSCP_INDEX_2_OFFSET$1 + UTRIE2_LSCP_INDEX_2_LENGTH$1; - /** - * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820. - * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2. - */ - var UTRIE2_UTF8_2B_INDEX_2_OFFSET$1 = UTRIE2_INDEX_2_BMP_LENGTH$1; - var UTRIE2_UTF8_2B_INDEX_2_LENGTH$1 = 0x800 >> 6; /* U+0800 is the first code point after 2-byte UTF-8 */ - /** - * The index-1 table, only used for supplementary code points, at offset 2112=0x840. - * Variable length, for code points up to highStart, where the last single-value range starts. - * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1. - * (For 0x100000 supplementary code points U+10000..U+10ffff.) - * - * The part of the index-2 table for supplementary code points starts - * after this index-1 table. - * - * Both the index-1 table and the following part of the index-2 table - * are omitted completely if there is only BMP data. - */ - var UTRIE2_INDEX_1_OFFSET$1 = UTRIE2_UTF8_2B_INDEX_2_OFFSET$1 + UTRIE2_UTF8_2B_INDEX_2_LENGTH$1; - /** - * Number of index-1 entries for the BMP. 32=0x20 - * This part of the index-1 table is omitted from the serialized form. - */ - var UTRIE2_OMITTED_BMP_INDEX_1_LENGTH$1 = 0x10000 >> UTRIE2_SHIFT_1$1; - /** Number of entries in an index-2 block. 64=0x40 */ - var UTRIE2_INDEX_2_BLOCK_LENGTH$1 = 1 << UTRIE2_SHIFT_1_2$1; - /** Mask for getting the lower bits for the in-index-2-block offset. */ - var UTRIE2_INDEX_2_MASK$1 = UTRIE2_INDEX_2_BLOCK_LENGTH$1 - 1; - var slice16$1 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint16Array(Array.prototype.slice.call(view, start, end)); - }; - var slice32$1 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint32Array(Array.prototype.slice.call(view, start, end)); - }; - var createTrieFromBase64$1 = function (base64, _byteLength) { - var buffer = decode$1(base64); - var view32 = Array.isArray(buffer) ? polyUint32Array$1(buffer) : new Uint32Array(buffer); - var view16 = Array.isArray(buffer) ? polyUint16Array$1(buffer) : new Uint16Array(buffer); - var headerLength = 24; - var index = slice16$1(view16, headerLength / 2, view32[4] / 2); - var data = view32[5] === 2 - ? slice16$1(view16, (headerLength + view32[4]) / 2) - : slice32$1(view32, Math.ceil((headerLength + view32[4]) / 4)); - return new Trie$1(view32[0], view32[1], view32[2], view32[3], index, data); - }; - var Trie$1 = /** @class */ (function () { - function Trie(initialValue, errorValue, highStart, highValueIndex, index, data) { - this.initialValue = initialValue; - this.errorValue = errorValue; - this.highStart = highStart; - this.highValueIndex = highValueIndex; - this.index = index; - this.data = data; - } - /** - * Get the value for a code point as stored in the Trie. - * - * @param codePoint the code point - * @return the value - */ - Trie.prototype.get = function (codePoint) { - var ix; - if (codePoint >= 0) { - if (codePoint < 0x0d800 || (codePoint > 0x0dbff && codePoint <= 0x0ffff)) { - // Ordinary BMP code point, excluding leading surrogates. - // BMP uses a single level lookup. BMP index starts at offset 0 in the Trie2 index. - // 16 bit data is stored in the index array itself. - ix = this.index[codePoint >> UTRIE2_SHIFT_2$1]; - ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1); - return this.data[ix]; - } - if (codePoint <= 0xffff) { - // Lead Surrogate Code Point. A Separate index section is stored for - // lead surrogate code units and code points. - // The main index has the code unit data. - // For this function, we need the code point data. - // Note: this expression could be refactored for slightly improved efficiency, but - // surrogate code points will be so rare in practice that it's not worth it. - ix = this.index[UTRIE2_LSCP_INDEX_2_OFFSET$1 + ((codePoint - 0xd800) >> UTRIE2_SHIFT_2$1)]; - ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1); - return this.data[ix]; - } - if (codePoint < this.highStart) { - // Supplemental code point, use two-level lookup. - ix = UTRIE2_INDEX_1_OFFSET$1 - UTRIE2_OMITTED_BMP_INDEX_1_LENGTH$1 + (codePoint >> UTRIE2_SHIFT_1$1); - ix = this.index[ix]; - ix += (codePoint >> UTRIE2_SHIFT_2$1) & UTRIE2_INDEX_2_MASK$1; - ix = this.index[ix]; - ix = (ix << UTRIE2_INDEX_SHIFT$1) + (codePoint & UTRIE2_DATA_MASK$1); - return this.data[ix]; - } - if (codePoint <= 0x10ffff) { - return this.data[this.highValueIndex]; - } - } - // Fall through. The code point is outside of the legal range of 0..0x10ffff. - return this.errorValue; - }; - return Trie; - }()); - - /* - * base64-arraybuffer 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars$3 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$3 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$3 = 0; i$3 < chars$3.length; i$3++) { - lookup$3[chars$3.charCodeAt(i$3)] = i$3; - } - - var base64$1 = 'KwAAAAAAAAAACA4AUD0AADAgAAACAAAAAAAIABAAGABAAEgAUABYAGAAaABgAGgAYgBqAF8AZwBgAGgAcQB5AHUAfQCFAI0AlQCdAKIAqgCyALoAYABoAGAAaABgAGgAwgDKAGAAaADGAM4A0wDbAOEA6QDxAPkAAQEJAQ8BFwF1AH0AHAEkASwBNAE6AUIBQQFJAVEBWQFhAWgBcAF4ATAAgAGGAY4BlQGXAZ8BpwGvAbUBvQHFAc0B0wHbAeMB6wHxAfkBAQIJAvEBEQIZAiECKQIxAjgCQAJGAk4CVgJeAmQCbAJ0AnwCgQKJApECmQKgAqgCsAK4ArwCxAIwAMwC0wLbAjAA4wLrAvMC+AIAAwcDDwMwABcDHQMlAy0DNQN1AD0DQQNJA0kDSQNRA1EDVwNZA1kDdQB1AGEDdQBpA20DdQN1AHsDdQCBA4kDkQN1AHUAmQOhA3UAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AKYDrgN1AHUAtgO+A8YDzgPWAxcD3gPjA+sD8wN1AHUA+wMDBAkEdQANBBUEHQQlBCoEFwMyBDgEYABABBcDSARQBFgEYARoBDAAcAQzAXgEgASIBJAEdQCXBHUAnwSnBK4EtgS6BMIEyAR1AHUAdQB1AHUAdQCVANAEYABgAGAAYABgAGAAYABgANgEYADcBOQEYADsBPQE/AQEBQwFFAUcBSQFLAU0BWQEPAVEBUsFUwVbBWAAYgVgAGoFcgV6BYIFigWRBWAAmQWfBaYFYABgAGAAYABgAKoFYACxBbAFuQW6BcEFwQXHBcEFwQXPBdMF2wXjBeoF8gX6BQIGCgYSBhoGIgYqBjIGOgZgAD4GRgZMBmAAUwZaBmAAYABgAGAAYABgAGAAYABgAGAAYABgAGIGYABpBnAGYABgAGAAYABgAGAAYABgAGAAYAB4Bn8GhQZgAGAAYAB1AHcDFQSLBmAAYABgAJMGdQA9A3UAmwajBqsGqwaVALMGuwbDBjAAywbSBtIG1QbSBtIG0gbSBtIG0gbdBuMG6wbzBvsGAwcLBxMHAwcbByMHJwcsBywHMQcsB9IGOAdAB0gHTgfSBkgHVgfSBtIG0gbSBtIG0gbSBtIG0gbSBiwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdgAGAALAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAdbB2MHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB2kH0gZwB64EdQB1AHUAdQB1AHUAdQB1AHUHfQdgAIUHjQd1AHUAlQedB2AAYAClB6sHYACzB7YHvgfGB3UAzgfWBzMB3gfmB1EB7gf1B/0HlQENAQUIDQh1ABUIHQglCBcDLQg1CD0IRQhNCEEDUwh1AHUAdQBbCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIaQhjCGQIZQhmCGcIaAhpCGMIZAhlCGYIZwhoCGkIYwhkCGUIZghnCGgIcAh3CHoIMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIgggwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAALAcsBywHLAcsBywHLAcsBywHLAcsB4oILAcsB44I0gaWCJ4Ipgh1AHUAqgiyCHUAdQB1AHUAdQB1AHUAdQB1AHUAtwh8AXUAvwh1AMUIyQjRCNkI4AjoCHUAdQB1AO4I9gj+CAYJDgkTCS0HGwkjCYIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiCCIIIggiAAIAAAAFAAYABgAGIAXwBgAHEAdQBFAJUAogCyAKAAYABgAEIA4ABGANMA4QDxAMEBDwE1AFwBLAE6AQEBUQF4QkhCmEKoQrhCgAHIQsAB0MLAAcABwAHAAeDC6ABoAHDCwMMAAcABwAHAAdDDGMMAAcAB6MM4wwjDWMNow3jDaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAGgAaABoAEjDqABWw6bDqABpg6gAaABoAHcDvwOPA+gAaABfA/8DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DvwO/A78DpcPAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcAB9cPKwkyCToJMAB1AHUAdQBCCUoJTQl1AFUJXAljCWcJawkwADAAMAAwAHMJdQB2CX4JdQCECYoJjgmWCXUAngkwAGAAYABxAHUApgn3A64JtAl1ALkJdQDACTAAMAAwADAAdQB1AHUAdQB1AHUAdQB1AHUAowYNBMUIMAAwADAAMADICcsJ0wnZCRUE4QkwAOkJ8An4CTAAMAB1AAAKvwh1AAgKDwoXCh8KdQAwACcKLgp1ADYKqAmICT4KRgowADAAdQB1AE4KMAB1AFYKdQBeCnUAZQowADAAMAAwADAAMAAwADAAMAAVBHUAbQowADAAdQC5CXUKMAAwAHwBxAijBogEMgF9CoQKiASMCpQKmgqIBKIKqgquCogEDQG2Cr4KxgrLCjAAMADTCtsKCgHjCusK8Qr5CgELMAAwADAAMAB1AIsECQsRC3UANAEZCzAAMAAwADAAMAB1ACELKQswAHUANAExCzkLdQBBC0kLMABRC1kLMAAwADAAMAAwADAAdQBhCzAAMAAwAGAAYABpC3ELdwt/CzAAMACHC4sLkwubC58Lpwt1AK4Ltgt1APsDMAAwADAAMAAwADAAMAAwAL4LwwvLC9IL1wvdCzAAMADlC+kL8Qv5C/8LSQswADAAMAAwADAAMAAwADAAMAAHDDAAMAAwADAAMAAODBYMHgx1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1ACYMMAAwADAAdQB1AHUALgx1AHUAdQB1AHUAdQA2DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AD4MdQBGDHUAdQB1AHUAdQB1AEkMdQB1AHUAdQB1AFAMMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQBYDHUAdQB1AF8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUA+wMVBGcMMAAwAHwBbwx1AHcMfwyHDI8MMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAYABgAJcMMAAwADAAdQB1AJ8MlQClDDAAMACtDCwHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsB7UMLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHdQB1AHUAdQB1AHUAdQB1AHUAdQB1AHUAdQB1AA0EMAC9DDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAsBywHLAcsBywHLAcsBywHLQcwAMEMyAwsBywHLAcsBywHLAcsBywHLAcsBywHzAwwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwAHUAdQB1ANQM2QzhDDAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMABgAGAAYABgAGAAYABgAOkMYADxDGAA+AwADQYNYABhCWAAYAAODTAAMAAwADAAFg1gAGAAHg37AzAAMAAwADAAYABgACYNYAAsDTQNPA1gAEMNPg1LDWAAYABgAGAAYABgAGAAYABgAGAAUg1aDYsGVglhDV0NcQBnDW0NdQ15DWAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAlQCBDZUAiA2PDZcNMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAnw2nDTAAMAAwADAAMAAwAHUArw23DTAAMAAwADAAMAAwADAAMAAwADAAMAB1AL8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAB1AHUAdQB1AHUAdQDHDTAAYABgAM8NMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA1w11ANwNMAAwAD0B5A0wADAAMAAwADAAMADsDfQN/A0EDgwOFA4wABsOMAAwADAAMAAwADAAMAAwANIG0gbSBtIG0gbSBtIG0gYjDigOwQUuDsEFMw7SBjoO0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGQg5KDlIOVg7SBtIGXg5lDm0OdQ7SBtIGfQ6EDooOjQ6UDtIGmg6hDtIG0gaoDqwO0ga0DrwO0gZgAGAAYADEDmAAYAAkBtIGzA5gANIOYADaDokO0gbSBt8O5w7SBu8O0gb1DvwO0gZgAGAAxA7SBtIG0gbSBtIGYABgAGAAYAAED2AAsAUMD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHJA8sBywHLAcsBywHLAccDywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywPLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAc0D9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAccD9IG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIGFA8sBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHLAcsBywHPA/SBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gbSBtIG0gYUD0QPlQCVAJUAMAAwADAAMACVAJUAlQCVAJUAlQCVAEwPMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAA//8EAAQABAAEAAQABAAEAAQABAANAAMAAQABAAIABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQACgATABcAHgAbABoAHgAXABYAEgAeABsAGAAPABgAHABLAEsASwBLAEsASwBLAEsASwBLABgAGAAeAB4AHgATAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABYAGwASAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWAA0AEQAeAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAFAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJABYAGgAbABsAGwAeAB0AHQAeAE8AFwAeAA0AHgAeABoAGwBPAE8ADgBQAB0AHQAdAE8ATwAXAE8ATwBPABYAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AHgAeAFAATwBAAE8ATwBPAEAATwBQAFAATwBQAB4AHgAeAB4AHgAeAB0AHQAdAB0AHgAdAB4ADgBQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgBQAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAJAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAkACQAJAAkACQAJAAkABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAFAAHgAeAB4AKwArAFAAUABQAFAAGABQACsAKwArACsAHgAeAFAAHgBQAFAAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUAAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAYAA0AKwArAB4AHgAbACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAB4ABAAEAB4ABAAEABMABAArACsAKwArACsAKwArACsAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAKwArACsAKwBWAFYAVgBWAB4AHgArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AGgAaABoAGAAYAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQAEwAEACsAEwATAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABLAEsASwBLAEsASwBLAEsASwBLABoAGQAZAB4AUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQABMAUAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABABQAFAABAAEAB4ABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUAAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAFAABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQAUABQAB4AHgAYABMAUAArACsABAAbABsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAFAABAAEAAQABAAEAFAABAAEAAQAUAAEAAQABAAEAAQAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArACsAHgArAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAUAAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEAA0ADQBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUAArACsAKwBQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABABQACsAKwArACsAKwArACsAKwAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUAAaABoAUABQAFAAUABQAEwAHgAbAFAAHgAEACsAKwAEAAQABAArAFAAUABQAFAAUABQACsAKwArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQACsAUABQACsAKwAEACsABAAEAAQABAAEACsAKwArACsABAAEACsAKwAEAAQABAArACsAKwAEACsAKwArACsAKwArACsAUABQAFAAUAArAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLAAQABABQAFAAUAAEAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsAKwAEAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAArACsAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AGwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAKwArACsAKwArAAQABAAEACsAKwArACsAUABQACsAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAAQAUAArAFAAUABQAFAAUABQACsAKwArAFAAUABQACsAUABQAFAAUAArACsAKwBQAFAAKwBQACsAUABQACsAKwArAFAAUAArACsAKwBQAFAAUAArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArAAQABAAEAAQABAArACsAKwAEAAQABAArAAQABAAEAAQAKwArAFAAKwArACsAKwArACsABAArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAHgAeAB4AHgAeAB4AGwAeACsAKwArACsAKwAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAUABQAFAAKwArACsAKwArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwAOAFAAUABQAFAAUABQAFAAHgBQAAQABAAEAA4AUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAKwArAAQAUAAEAAQABAAEAAQABAAEACsABAAEAAQAKwAEAAQABAAEACsAKwArACsAKwArACsABAAEACsAKwArACsAKwArACsAUAArAFAAUAAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAFAABAAEAAQABAAEAAQABAArAAQABAAEACsABAAEAAQABABQAB4AKwArACsAKwBQAFAAUAAEAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQABoAUABQAFAAUABQAFAAKwAEAAQABAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQACsAUAArACsAUABQAFAAUABQAFAAUAArACsAKwAEACsAKwArACsABAAEAAQABAAEAAQAKwAEACsABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArAAQABAAeACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAXAAqACoAKgAqACoAKgAqACsAKwArACsAGwBcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAeAEsASwBLAEsASwBLAEsASwBLAEsADQANACsAKwArACsAKwBcAFwAKwBcACsAXABcAFwAXABcACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAXAArAFwAXABcAFwAXABcAFwAXABcAFwAKgBcAFwAKgAqACoAKgAqACoAKgAqACoAXAArACsAXABcAFwAXABcACsAXAArACoAKgAqACoAKgAqACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwBcAFwAXABcAFAADgAOAA4ADgAeAA4ADgAJAA4ADgANAAkAEwATABMAEwATAAkAHgATAB4AHgAeAAQABAAeAB4AHgAeAB4AHgBLAEsASwBLAEsASwBLAEsASwBLAFAAUABQAFAAUABQAFAAUABQAFAADQAEAB4ABAAeAAQAFgARABYAEQAEAAQAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQADQAEAAQABAAEAAQADQAEAAQAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAA0ADQAeAB4AHgAeAB4AHgAEAB4AHgAeAB4AHgAeACsAHgAeAA4ADgANAA4AHgAeAB4AHgAeAAkACQArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgBcAEsASwBLAEsASwBLAEsASwBLAEsADQANAB4AHgAeAB4AXABcAFwAXABcAFwAKgAqACoAKgBcAFwAXABcACoAKgAqAFwAKgAqACoAXABcACoAKgAqACoAKgAqACoAXABcAFwAKgAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKgAqAFwAKgBLAEsASwBLAEsASwBLAEsASwBLACoAKgAqACoAKgAqAFAAUABQAFAAUABQACsAUAArACsAKwArACsAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAKwBQACsAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsABAAEAAQAHgANAB4AHgAeAB4AHgAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUAArACsADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAWABEAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQANAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAANAA0AKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUAArAAQABAArACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqAA0ADQAVAFwADQAeAA0AGwBcACoAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwAeAB4AEwATAA0ADQAOAB4AEwATAB4ABAAEAAQACQArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAHgArACsAKwATABMASwBLAEsASwBLAEsASwBLAEsASwBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAXABcAFwAXABcACsAKwArACsAKwArACsAKwArACsAKwBcAFwAXABcAFwAXABcAFwAXABcAFwAXAArACsAKwArAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAXAArACsAKwAqACoAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAArACsAHgAeAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcACoAKgAqACoAKgAqACoAKgAqACoAKwAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKwArAAQASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACoAKgAqACoAKgAqACoAXAAqACoAKgAqACoAKgArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABABQAFAAUABQAFAAUABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwANAA0AHgANAA0ADQANAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAEAAQABAAEAAQAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwAeAB4AHgAeAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArAA0ADQANAA0ADQBLAEsASwBLAEsASwBLAEsASwBLACsAKwArAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAA0ADQBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUAAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArAAQABAAEAB4ABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAAQAUABQAFAAUABQAFAABABQAFAABAAEAAQAUAArACsAKwArACsABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQACsAUAArAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAFAAUABQACsAHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQACsAKwAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQACsAHgAeAB4AHgAeAB4AHgAOAB4AKwANAA0ADQANAA0ADQANAAkADQANAA0ACAAEAAsABAAEAA0ACQANAA0ADAAdAB0AHgAXABcAFgAXABcAFwAWABcAHQAdAB4AHgAUABQAFAANAAEAAQAEAAQABAAEAAQACQAaABoAGgAaABoAGgAaABoAHgAXABcAHQAVABUAHgAeAB4AHgAeAB4AGAAWABEAFQAVABUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ADQAeAA0ADQANAA0AHgANAA0ADQAHAB4AHgAeAB4AKwAEAAQABAAEAAQABAAEAAQABAAEAFAAUAArACsATwBQAFAAUABQAFAAHgAeAB4AFgARAE8AUABPAE8ATwBPAFAAUABQAFAAUAAeAB4AHgAWABEAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArABsAGwAbABsAGwAbABsAGgAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGgAbABsAGwAbABoAGwAbABoAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbABsAGwAbAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAHgAeAFAAGgAeAB0AHgBQAB4AGgAeAB4AHgAeAB4AHgAeAB4AHgBPAB4AUAAbAB4AHgBQAFAAUABQAFAAHgAeAB4AHQAdAB4AUAAeAFAAHgBQAB4AUABPAFAAUAAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAHgBQAFAAUABQAE8ATwBQAFAAUABQAFAATwBQAFAATwBQAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAUABQAFAATwBPAE8ATwBPAE8ATwBPAE8ATwBQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABPAB4AHgArACsAKwArAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHQAdAB4AHgAeAB0AHQAeAB4AHQAeAB4AHgAdAB4AHQAbABsAHgAdAB4AHgAeAB4AHQAeAB4AHQAdAB0AHQAeAB4AHQAeAB0AHgAdAB0AHQAdAB0AHQAeAB0AHgAeAB4AHgAeAB0AHQAdAB0AHgAeAB4AHgAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB4AHgAeAB0AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAeAB0AHQAdAB0AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAdAB4AHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAWABEAHgAeAB4AHgAeAB4AHQAeAB4AHgAeAB4AHgAeACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAWABEAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAFAAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAeAB4AHQAdAB0AHQAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB0AHQAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB0AHQAeAB4AHQAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AHQAdAB0AHgAeAB0AHgAeAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlAB4AHQAdAB4AHgAdAB4AHgAeAB4AHQAdAB4AHgAeAB4AJQAlAB0AHQAlAB4AJQAlACUAIAAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAeAB4AHgAeAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHgAdAB0AHQAeAB0AJQAdAB0AHgAdAB0AHgAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHQAdAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAdAB0AHQAdACUAHgAlACUAJQAdACUAJQAdAB0AHQAlACUAHQAdACUAHQAdACUAJQAlAB4AHQAeAB4AHgAeAB0AHQAlAB0AHQAdAB0AHQAdACUAJQAlACUAJQAdACUAJQAgACUAHQAdACUAJQAlACUAJQAlACUAJQAeAB4AHgAlACUAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB0AHgAeAB4AFwAXABcAFwAXABcAHgATABMAJQAeAB4AHgAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARABYAEQAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAWABEAFgARABYAEQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAWABEAFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AFgARAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAdAB0AHQAdAB0AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAFAAUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAEAAQABAAeAB4AKwArACsAKwArABMADQANAA0AUAATAA0AUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUAANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAA0ADQANAA0ADQANAA0ADQAeAA0AFgANAB4AHgAXABcAHgAeABcAFwAWABEAFgARABYAEQAWABEADQANAA0ADQATAFAADQANAB4ADQANAB4AHgAeAB4AHgAMAAwADQANAA0AHgANAA0AFgANAA0ADQANAA0ADQANAA0AHgANAB4ADQANAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArACsAKwArACsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArAA0AEQARACUAJQBHAFcAVwAWABEAFgARABYAEQAWABEAFgARACUAJQAWABEAFgARABYAEQAWABEAFQAWABEAEQAlAFcAVwBXAFcAVwBXAFcAVwBXAAQABAAEAAQABAAEACUAVwBXAFcAVwA2ACUAJQBXAFcAVwBHAEcAJQAlACUAKwBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBRAFcAUQBXAFEAVwBXAFcAVwBXAFcAUQBXAFcAVwBXAFcAVwBRAFEAKwArAAQABAAVABUARwBHAFcAFQBRAFcAUQBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFEAVwBRAFcAUQBXAFcAVwBXAFcAVwBRAFcAVwBXAFcAVwBXAFEAUQBXAFcAVwBXABUAUQBHAEcAVwArACsAKwArACsAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwAlACUAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACsAKwArACsAKwArACsAKwArACsAKwArAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAUQBRAFEAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBPAE8ATwBPAE8ATwBPAE8AJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADQATAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABLAEsASwBLAEsASwBLAEsASwBLAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAABAAEAAQABAAeAAQABAAEAAQABAAEAAQABAAEAAQAHgBQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUABQAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAeAA0ADQANAA0ADQArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AUAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAB4AHgAeAB4AHgAeAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AUABQAFAAUABQAFAAUABQAFAAUABQAAQAUABQAFAABABQAFAAUABQAAQAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAeAB4AHgAeAAQAKwArACsAUABQAFAAUABQAFAAHgAeABoAHgArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAADgAOABMAEwArACsAKwArACsAKwArACsABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwANAA0ASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUAAeAB4AHgBQAA4AUABQAAQAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArAB4AWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYAFgAWABYACsAKwArAAQAHgAeAB4AHgAeAB4ADQANAA0AHgAeAB4AHgArAFAASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArAB4AHgBcAFwAXABcAFwAKgBcAFwAXABcAFwAXABcAFwAXABcAEsASwBLAEsASwBLAEsASwBLAEsAXABcAFwAXABcACsAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAFAAUABQAAQAUABQAFAAUABQAFAAUABQAAQABAArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAHgANAA0ADQBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKgAqACoAXAAqACoAKgBcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXAAqAFwAKgAqACoAXABcACoAKgBcAFwAXABcAFwAKgAqAFwAKgBcACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFwAXABcACoAKgBQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAA0ADQBQAFAAUAAEAAQAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQADQAEAAQAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAVABVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBUAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVAFUAVQBVACsAKwArACsAKwArACsAKwArACsAKwArAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAWQBZAFkAKwArACsAKwBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAWgBaAFoAKwArACsAKwAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYABgAGAAYAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAKwArACsAKwArAFYABABWAFYAVgBWAFYAVgBWAFYAVgBWAB4AVgBWAFYAVgBWAFYAVgBWAFYAVgBWAFYAVgArAFYAVgBWAFYAVgArAFYAKwBWAFYAKwBWAFYAKwBWAFYAVgBWAFYAVgBWAFYAVgBWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAEQAWAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAaAB4AKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAGAARABEAGAAYABMAEwAWABEAFAArACsAKwArACsAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACUAJQAlACUAJQAWABEAFgARABYAEQAWABEAFgARABYAEQAlACUAFgARACUAJQAlACUAJQAlACUAEQAlABEAKwAVABUAEwATACUAFgARABYAEQAWABEAJQAlACUAJQAlACUAJQAlACsAJQAbABoAJQArACsAKwArAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAcAKwATACUAJQAbABoAJQAlABYAEQAlACUAEQAlABEAJQBXAFcAVwBXAFcAVwBXAFcAVwBXABUAFQAlACUAJQATACUAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXABYAJQARACUAJQAlAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAWACUAEQAlABYAEQARABYAEQARABUAVwBRAFEAUQBRAFEAUQBRAFEAUQBRAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAEcARwArACsAVwBXAFcAVwBXAFcAKwArAFcAVwBXAFcAVwBXACsAKwBXAFcAVwBXAFcAVwArACsAVwBXAFcAKwArACsAGgAbACUAJQAlABsAGwArAB4AHgAeAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwAEAAQABAAQAB0AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsADQANAA0AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAA0AUABQAFAAUAArACsAKwArAFAAUABQAFAAUABQAFAAUAANAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwArAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwBQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwANAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAB4AUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAUABQAFAAUABQAAQABAAEACsABAAEACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAKwBQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAA0ADQANAA0ADQANAA0ADQAeACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAArACsAKwArAFAAUABQAFAAUAANAA0ADQANAA0ADQAUACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsADQANAA0ADQANAA0ADQBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAB4AHgAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArAAQABAANACsAKwBQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAB4AHgAeAB4AHgArACsAKwArACsAKwAEAAQABAAEAAQABAAEAA0ADQAeAB4AHgAeAB4AKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwAeACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEACsASwBLAEsASwBLAEsASwBLAEsASwANAA0ADQANAFAABAAEAFAAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAeAA4AUAArACsAKwArACsAKwArACsAKwAEAFAAUABQAFAADQANAB4ADQAEAAQABAAEAB4ABAAEAEsASwBLAEsASwBLAEsASwBLAEsAUAAOAFAADQANAA0AKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAANAA0AHgANAA0AHgAEACsAUABQAFAAUABQAFAAUAArAFAAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAA0AKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsABAAEAAQABAArAFAAUABQAFAAUABQAFAAUAArACsAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQACsABAAEAFAABAAEAAQABAAEAAQABAArACsABAAEACsAKwAEAAQABAArACsAUAArACsAKwArACsAKwAEACsAKwArACsAKwBQAFAAUABQAFAABAAEACsAKwAEAAQABAAEAAQABAAEACsAKwArAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsABAAEAAQABAAEAAQABABQAFAAUABQAA0ADQANAA0AHgBLAEsASwBLAEsASwBLAEsASwBLAA0ADQArAB4ABABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAFAAUAAeAFAAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABAAEAAQADgANAA0AEwATAB4AHgAeAA0ADQANAA0ADQANAA0ADQANAA0ADQANAA0ADQANAFAAUABQAFAABAAEACsAKwAEAA0ADQAeAFAAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAFAAKwArACsAKwArACsAKwBLAEsASwBLAEsASwBLAEsASwBLACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAXABcAFwAKwArACoAKgAqACoAKgAqACoAKgAqACoAKgAqACoAKgAqACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBcAFwADQANAA0AKgBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAKwArAFAAKwArAFAAUABQAFAAUABQAFAAUAArAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQAKwAEAAQAKwArAAQABAAEAAQAUAAEAFAABAAEAA0ADQANACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAArACsABAAEAAQABAAEAAQABABQAA4AUAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAABAAEAAQABAAEAAQABAAEAAQABABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAFAABAAEAAQABAAOAB4ADQANAA0ADQAOAB4ABAArACsAKwArACsAKwArACsAUAAEAAQABAAEAAQABAAEAAQABAAEAAQAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAA0ADQANAFAADgAOAA4ADQANACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAEAAQABAAEACsABAAEAAQABAAEAAQABAAEAFAADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAOABMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQACsAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAArACsAKwAEACsABAAEACsABAAEAAQABAAEAAQABABQAAQAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAUABQAFAAUABQAFAAKwBQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAUAArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAABAAEAAQABAAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAaABoAGgAaAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArAA0AUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsADQANAA0ADQANACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABIAEgAQwBDAEMAUABQAFAAUABDAFAAUABQAEgAQwBIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAASABDAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwAJAAkACQAJAAkACQAJABYAEQArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABIAEMAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwANAA0AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArAAQABAAEAAQABAANACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEAA0ADQANAB4AHgAeAB4AHgAeAFAAUABQAFAADQAeACsAKwArACsAKwArACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAANAA0AHgAeACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwAEAFAABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwAEAAQABAAEAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAARwBHABUARwAJACsAKwArACsAKwArACsAKwArACsAKwAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUQBRAFEAKwArACsAKwArACsAKwArACsAKwArACsAKwBRAFEAUQBRACsAKwArACsAKwArACsAKwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUAArACsAHgAEAAQADQAEAAQABAAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAAQABAAEAAQABAAeAB4AHgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAB4AHgAEAAQABAAEAAQABAAEAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4ABAAEAAQAHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwArACsAKwArACsAKwArACsAKwArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAKwArAFAAKwArAFAAUAArACsAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACsAUAArAFAAUABQAFAAUABQAFAAKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwBQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAHgAeAFAAUABQAFAAUAArAFAAKwArACsAUABQAFAAUABQAFAAUAArAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAB4AHgAeAB4AHgAeAB4AHgAeACsAKwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAEsASwBLAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAeAB4AHgAeAB4AHgAeAB4ABAAeAB4AHgAeAB4AHgAeAB4AHgAeAAQAHgAeAA0ADQANAA0AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQAKwAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArAAQABAAEAAQABAAEAAQAKwAEAAQAKwAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwAEAAQABAAEAAQABAAEAFAAUABQAFAAUABQAFAAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwBQAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArABsAUABQAFAAUABQACsAKwBQAFAAUABQAFAAUABQAFAAUAAEAAQABAAEAAQABAAEACsAKwArACsAKwArACsAKwArAB4AHgAeAB4ABAAEAAQABAAEAAQABABQACsAKwArACsASwBLAEsASwBLAEsASwBLAEsASwArACsAKwArABYAFgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAGgBQAFAAUAAaAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAeAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQACsAKwBQAFAAUABQACsAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUAArACsAKwArACsAKwBQACsAKwArACsAUAArAFAAKwBQACsAUABQAFAAKwBQAFAAKwBQACsAKwBQACsAUAArAFAAKwBQACsAUAArAFAAUAArAFAAKwArAFAAUABQAFAAKwBQAFAAUABQAFAAUABQACsAUABQAFAAUAArAFAAUABQAFAAKwBQACsAUABQAFAAUABQAFAAUABQAFAAUAArAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAArACsAKwArACsAUABQAFAAKwBQAFAAUABQAFAAKwBQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwAeAB4AKwArACsAKwArACsAKwArACsAKwArACsAKwArAE8ATwBPAE8ATwBPAE8ATwBPAE8ATwBPAE8AJQAlACUAHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHgAeAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB4AHgAeACUAJQAlAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAdAB0AHQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAKQApACkAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAlACUAJQAlACUAHgAlACUAJQAlACUAIAAgACAAJQAlACAAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACEAIQAhACEAIQAlACUAIAAgACUAJQAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlACUAIAAlACUAJQAlACAAIAAgACUAIAAgACAAJQAlACUAJQAlACUAJQAgACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAlAB4AJQAeACUAJQAlACUAJQAgACUAJQAlACUAHgAlAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAgACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACAAIAAgACAAIAAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeABcAFwAXABUAFQAVAB4AHgAeAB4AJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAgACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlACUAJQAeAB4AHgAeAB4AHgAeAB4AHgAeACUAJQAlACUAJQAlAB4AHgAeAB4AHgAeAB4AHgAlACUAJQAlACUAJQAlACUAHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAgACUAJQAgACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAJQAlACUAJQAlACUAIAAlACUAJQAlACUAJQAlACUAJQAgACAAIAAgACAAIAAgACAAIAAgACUAJQAgACAAIAAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACAAIAAlACAAIAAlACAAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAgACAAIAAlACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAJQAlAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AKwAeAB4AHgAeAB4AHgAeAB4AHgAeAB4AHgArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAEsASwBLAEsASwBLAEsASwBLAEsAKwArACsAKwArACsAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwArAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwAlACUAJQAlACUAJQAlACUAJQAlACUAVwBXACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQBXAFcAVwBXAFcAVwBXAFcAVwBXAFcAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAJQAlACUAKwAEACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArACsAKwArAA=='; - - var LETTER_NUMBER_MODIFIER = 50; - // Non-tailorable Line Breaking Classes - var BK = 1; // Cause a line break (after) - var CR$1 = 2; // Cause a line break (after), except between CR and LF - var LF$1 = 3; // Cause a line break (after) - var CM = 4; // Prohibit a line break between the character and the preceding character - var NL = 5; // Cause a line break (after) - var WJ = 7; // Prohibit line breaks before and after - var ZW = 8; // Provide a break opportunity - var GL = 9; // Prohibit line breaks before and after - var SP = 10; // Enable indirect line breaks - var ZWJ$1 = 11; // Prohibit line breaks within joiner sequences - // Break Opportunities - var B2 = 12; // Provide a line break opportunity before and after the character - var BA = 13; // Generally provide a line break opportunity after the character - var BB = 14; // Generally provide a line break opportunity before the character - var HY = 15; // Provide a line break opportunity after the character, except in numeric context - var CB = 16; // Provide a line break opportunity contingent on additional information - // Characters Prohibiting Certain Breaks - var CL = 17; // Prohibit line breaks before - var CP = 18; // Prohibit line breaks before - var EX = 19; // Prohibit line breaks before - var IN = 20; // Allow only indirect line breaks between pairs - var NS = 21; // Allow only indirect line breaks before - var OP = 22; // Prohibit line breaks after - var QU = 23; // Act like they are both opening and closing - // Numeric Context - var IS = 24; // Prevent breaks after any and before numeric - var NU = 25; // Form numeric expressions for line breaking purposes - var PO = 26; // Do not break following a numeric expression - var PR = 27; // Do not break in front of a numeric expression - var SY = 28; // Prevent a break before; and allow a break after - // Other Characters - var AI = 29; // Act like AL when the resolvedEAW is N; otherwise; act as ID - var AL = 30; // Are alphabetic characters or symbols that are used with alphabetic characters - var CJ = 31; // Treat as NS or ID for strict or normal breaking. - var EB = 32; // Do not break from following Emoji Modifier - var EM = 33; // Do not break from preceding Emoji Base - var H2 = 34; // Form Korean syllable blocks - var H3 = 35; // Form Korean syllable blocks - var HL = 36; // Do not break around a following hyphen; otherwise act as Alphabetic - var ID = 37; // Break before or after; except in some numeric context - var JL = 38; // Form Korean syllable blocks - var JV = 39; // Form Korean syllable blocks - var JT = 40; // Form Korean syllable blocks - var RI$1 = 41; // Keep pairs together. For pairs; break before and after other classes - var SA = 42; // Provide a line break opportunity contingent on additional, language-specific context analysis - var XX = 43; // Have as yet unknown line breaking behavior or unassigned code positions - var ea_OP = [0x2329, 0xff08]; - var BREAK_MANDATORY = '!'; - var BREAK_NOT_ALLOWED$1 = '×'; - var BREAK_ALLOWED$1 = '÷'; - var UnicodeTrie$1 = createTrieFromBase64$1(base64$1); - var ALPHABETICS = [AL, HL]; - var HARD_LINE_BREAKS = [BK, CR$1, LF$1, NL]; - var SPACE$1 = [SP, ZW]; - var PREFIX_POSTFIX = [PR, PO]; - var LINE_BREAKS = HARD_LINE_BREAKS.concat(SPACE$1); - var KOREAN_SYLLABLE_BLOCK = [JL, JV, JT, H2, H3]; - var HYPHEN = [HY, BA]; - var codePointsToCharacterClasses = function (codePoints, lineBreak) { - if (lineBreak === void 0) { lineBreak = 'strict'; } - var types = []; - var indices = []; - var categories = []; - codePoints.forEach(function (codePoint, index) { - var classType = UnicodeTrie$1.get(codePoint); - if (classType > LETTER_NUMBER_MODIFIER) { - categories.push(true); - classType -= LETTER_NUMBER_MODIFIER; - } - else { - categories.push(false); - } - if (['normal', 'auto', 'loose'].indexOf(lineBreak) !== -1) { - // U+2010, – U+2013, 〜 U+301C, ゠ U+30A0 - if ([0x2010, 0x2013, 0x301c, 0x30a0].indexOf(codePoint) !== -1) { - indices.push(index); - return types.push(CB); - } - } - if (classType === CM || classType === ZWJ$1) { - // LB10 Treat any remaining combining mark or ZWJ as AL. - if (index === 0) { - indices.push(index); - return types.push(AL); - } - // LB9 Do not break a combining character sequence; treat it as if it has the line breaking class of - // the base character in all of the following rules. Treat ZWJ as if it were CM. - var prev = types[index - 1]; - if (LINE_BREAKS.indexOf(prev) === -1) { - indices.push(indices[index - 1]); - return types.push(prev); - } - indices.push(index); - return types.push(AL); - } - indices.push(index); - if (classType === CJ) { - return types.push(lineBreak === 'strict' ? NS : ID); - } - if (classType === SA) { - return types.push(AL); - } - if (classType === AI) { - return types.push(AL); - } - // For supplementary characters, a useful default is to treat characters in the range 10000..1FFFD as AL - // and characters in the ranges 20000..2FFFD and 30000..3FFFD as ID, until the implementation can be revised - // to take into account the actual line breaking properties for these characters. - if (classType === XX) { - if ((codePoint >= 0x20000 && codePoint <= 0x2fffd) || (codePoint >= 0x30000 && codePoint <= 0x3fffd)) { - return types.push(ID); - } - else { - return types.push(AL); - } - } - types.push(classType); - }); - return [indices, types, categories]; - }; - var isAdjacentWithSpaceIgnored = function (a, b, currentIndex, classTypes) { - var current = classTypes[currentIndex]; - if (Array.isArray(a) ? a.indexOf(current) !== -1 : a === current) { - var i = currentIndex; - while (i <= classTypes.length) { - i++; - var next = classTypes[i]; - if (next === b) { - return true; - } - if (next !== SP) { - break; - } - } - } - if (current === SP) { - var i = currentIndex; - while (i > 0) { - i--; - var prev = classTypes[i]; - if (Array.isArray(a) ? a.indexOf(prev) !== -1 : a === prev) { - var n = currentIndex; - while (n <= classTypes.length) { - n++; - var next = classTypes[n]; - if (next === b) { - return true; - } - if (next !== SP) { - break; - } - } - } - if (prev !== SP) { - break; - } - } - } - return false; - }; - var previousNonSpaceClassType = function (currentIndex, classTypes) { - var i = currentIndex; - while (i >= 0) { - var type = classTypes[i]; - if (type === SP) { - i--; - } - else { - return type; - } - } - return 0; - }; - var _lineBreakAtIndex = function (codePoints, classTypes, indicies, index, forbiddenBreaks) { - if (indicies[index] === 0) { - return BREAK_NOT_ALLOWED$1; - } - var currentIndex = index - 1; - if (Array.isArray(forbiddenBreaks) && forbiddenBreaks[currentIndex] === true) { - return BREAK_NOT_ALLOWED$1; - } - var beforeIndex = currentIndex - 1; - var afterIndex = currentIndex + 1; - var current = classTypes[currentIndex]; - // LB4 Always break after hard line breaks. - // LB5 Treat CR followed by LF, as well as CR, LF, and NL as hard line breaks. - var before = beforeIndex >= 0 ? classTypes[beforeIndex] : 0; - var next = classTypes[afterIndex]; - if (current === CR$1 && next === LF$1) { - return BREAK_NOT_ALLOWED$1; - } - if (HARD_LINE_BREAKS.indexOf(current) !== -1) { - return BREAK_MANDATORY; - } - // LB6 Do not break before hard line breaks. - if (HARD_LINE_BREAKS.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB7 Do not break before spaces or zero width space. - if (SPACE$1.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB8 Break before any character following a zero-width space, even if one or more spaces intervene. - if (previousNonSpaceClassType(currentIndex, classTypes) === ZW) { - return BREAK_ALLOWED$1; - } - // LB8a Do not break after a zero width joiner. - if (UnicodeTrie$1.get(codePoints[currentIndex]) === ZWJ$1) { - return BREAK_NOT_ALLOWED$1; - } - // zwj emojis - if ((current === EB || current === EM) && UnicodeTrie$1.get(codePoints[afterIndex]) === ZWJ$1) { - return BREAK_NOT_ALLOWED$1; - } - // LB11 Do not break before or after Word joiner and related characters. - if (current === WJ || next === WJ) { - return BREAK_NOT_ALLOWED$1; - } - // LB12 Do not break after NBSP and related characters. - if (current === GL) { - return BREAK_NOT_ALLOWED$1; - } - // LB12a Do not break before NBSP and related characters, except after spaces and hyphens. - if ([SP, BA, HY].indexOf(current) === -1 && next === GL) { - return BREAK_NOT_ALLOWED$1; - } - // LB13 Do not break before ‘]’ or ‘!’ or ‘;’ or ‘/’, even after spaces. - if ([CL, CP, EX, IS, SY].indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB14 Do not break after ‘[’, even after spaces. - if (previousNonSpaceClassType(currentIndex, classTypes) === OP) { - return BREAK_NOT_ALLOWED$1; - } - // LB15 Do not break within ‘”[’, even with intervening spaces. - if (isAdjacentWithSpaceIgnored(QU, OP, currentIndex, classTypes)) { - return BREAK_NOT_ALLOWED$1; - } - // LB16 Do not break between closing punctuation and a nonstarter (lb=NS), even with intervening spaces. - if (isAdjacentWithSpaceIgnored([CL, CP], NS, currentIndex, classTypes)) { - return BREAK_NOT_ALLOWED$1; - } - // LB17 Do not break within ‘——’, even with intervening spaces. - if (isAdjacentWithSpaceIgnored(B2, B2, currentIndex, classTypes)) { - return BREAK_NOT_ALLOWED$1; - } - // LB18 Break after spaces. - if (current === SP) { - return BREAK_ALLOWED$1; - } - // LB19 Do not break before or after quotation marks, such as ‘ ” ’. - if (current === QU || next === QU) { - return BREAK_NOT_ALLOWED$1; - } - // LB20 Break before and after unresolved CB. - if (next === CB || current === CB) { - return BREAK_ALLOWED$1; - } - // LB21 Do not break before hyphen-minus, other hyphens, fixed-width spaces, small kana, and other non-starters, or after acute accents. - if ([BA, HY, NS].indexOf(next) !== -1 || current === BB) { - return BREAK_NOT_ALLOWED$1; - } - // LB21a Don't break after Hebrew + Hyphen. - if (before === HL && HYPHEN.indexOf(current) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB21b Don’t break between Solidus and Hebrew letters. - if (current === SY && next === HL) { - return BREAK_NOT_ALLOWED$1; - } - // LB22 Do not break before ellipsis. - if (next === IN) { - return BREAK_NOT_ALLOWED$1; - } - // LB23 Do not break between digits and letters. - if ((ALPHABETICS.indexOf(next) !== -1 && current === NU) || (ALPHABETICS.indexOf(current) !== -1 && next === NU)) { - return BREAK_NOT_ALLOWED$1; - } - // LB23a Do not break between numeric prefixes and ideographs, or between ideographs and numeric postfixes. - if ((current === PR && [ID, EB, EM].indexOf(next) !== -1) || - ([ID, EB, EM].indexOf(current) !== -1 && next === PO)) { - return BREAK_NOT_ALLOWED$1; - } - // LB24 Do not break between numeric prefix/postfix and letters, or between letters and prefix/postfix. - if ((ALPHABETICS.indexOf(current) !== -1 && PREFIX_POSTFIX.indexOf(next) !== -1) || - (PREFIX_POSTFIX.indexOf(current) !== -1 && ALPHABETICS.indexOf(next) !== -1)) { - return BREAK_NOT_ALLOWED$1; - } - // LB25 Do not break between the following pairs of classes relevant to numbers: - if ( - // (PR | PO) × ( OP | HY )? NU - ([PR, PO].indexOf(current) !== -1 && - (next === NU || ([OP, HY].indexOf(next) !== -1 && classTypes[afterIndex + 1] === NU))) || - // ( OP | HY ) × NU - ([OP, HY].indexOf(current) !== -1 && next === NU) || - // NU × (NU | SY | IS) - (current === NU && [NU, SY, IS].indexOf(next) !== -1)) { - return BREAK_NOT_ALLOWED$1; - } - // NU (NU | SY | IS)* × (NU | SY | IS | CL | CP) - if ([NU, SY, IS, CL, CP].indexOf(next) !== -1) { - var prevIndex = currentIndex; - while (prevIndex >= 0) { - var type = classTypes[prevIndex]; - if (type === NU) { - return BREAK_NOT_ALLOWED$1; - } - else if ([SY, IS].indexOf(type) !== -1) { - prevIndex--; - } - else { - break; - } - } - } - // NU (NU | SY | IS)* (CL | CP)? × (PO | PR)) - if ([PR, PO].indexOf(next) !== -1) { - var prevIndex = [CL, CP].indexOf(current) !== -1 ? beforeIndex : currentIndex; - while (prevIndex >= 0) { - var type = classTypes[prevIndex]; - if (type === NU) { - return BREAK_NOT_ALLOWED$1; - } - else if ([SY, IS].indexOf(type) !== -1) { - prevIndex--; - } - else { - break; - } - } - } - // LB26 Do not break a Korean syllable. - if ((JL === current && [JL, JV, H2, H3].indexOf(next) !== -1) || - ([JV, H2].indexOf(current) !== -1 && [JV, JT].indexOf(next) !== -1) || - ([JT, H3].indexOf(current) !== -1 && next === JT)) { - return BREAK_NOT_ALLOWED$1; - } - // LB27 Treat a Korean Syllable Block the same as ID. - if ((KOREAN_SYLLABLE_BLOCK.indexOf(current) !== -1 && [IN, PO].indexOf(next) !== -1) || - (KOREAN_SYLLABLE_BLOCK.indexOf(next) !== -1 && current === PR)) { - return BREAK_NOT_ALLOWED$1; - } - // LB28 Do not break between alphabetics (“at”). - if (ALPHABETICS.indexOf(current) !== -1 && ALPHABETICS.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB29 Do not break between numeric punctuation and alphabetics (“e.g.”). - if (current === IS && ALPHABETICS.indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED$1; - } - // LB30 Do not break between letters, numbers, or ordinary symbols and opening or closing parentheses. - if ((ALPHABETICS.concat(NU).indexOf(current) !== -1 && - next === OP && - ea_OP.indexOf(codePoints[afterIndex]) === -1) || - (ALPHABETICS.concat(NU).indexOf(next) !== -1 && current === CP)) { - return BREAK_NOT_ALLOWED$1; - } - // LB30a Break between two regional indicator symbols if and only if there are an even number of regional - // indicators preceding the position of the break. - if (current === RI$1 && next === RI$1) { - var i = indicies[currentIndex]; - var count = 1; - while (i > 0) { - i--; - if (classTypes[i] === RI$1) { - count++; - } - else { - break; - } - } - if (count % 2 !== 0) { - return BREAK_NOT_ALLOWED$1; - } - } - // LB30b Do not break between an emoji base and an emoji modifier. - if (current === EB && next === EM) { - return BREAK_NOT_ALLOWED$1; - } - return BREAK_ALLOWED$1; - }; - var cssFormattedClasses = function (codePoints, options) { - if (!options) { - options = { lineBreak: 'normal', wordBreak: 'normal' }; - } - var _a = codePointsToCharacterClasses(codePoints, options.lineBreak), indicies = _a[0], classTypes = _a[1], isLetterNumber = _a[2]; - if (options.wordBreak === 'break-all' || options.wordBreak === 'break-word') { - classTypes = classTypes.map(function (type) { return ([NU, AL, SA].indexOf(type) !== -1 ? ID : type); }); - } - var forbiddenBreakpoints = options.wordBreak === 'keep-all' - ? isLetterNumber.map(function (letterNumber, i) { - return letterNumber && codePoints[i] >= 0x4e00 && codePoints[i] <= 0x9fff; - }) - : undefined; - return [indicies, classTypes, forbiddenBreakpoints]; - }; - var Break = /** @class */ (function () { - function Break(codePoints, lineBreak, start, end) { - this.codePoints = codePoints; - this.required = lineBreak === BREAK_MANDATORY; - this.start = start; - this.end = end; - } - Break.prototype.slice = function () { - return fromCodePoint$1.apply(void 0, this.codePoints.slice(this.start, this.end)); - }; - return Break; - }()); - var LineBreaker = function (str, options) { - var codePoints = toCodePoints$1(str); - var _a = cssFormattedClasses(codePoints, options), indicies = _a[0], classTypes = _a[1], forbiddenBreakpoints = _a[2]; - var length = codePoints.length; - var lastEnd = 0; - var nextIndex = 0; - return { - next: function () { - if (nextIndex >= length) { - return { done: true, value: null }; - } - var lineBreak = BREAK_NOT_ALLOWED$1; - while (nextIndex < length && - (lineBreak = _lineBreakAtIndex(codePoints, classTypes, indicies, ++nextIndex, forbiddenBreakpoints)) === - BREAK_NOT_ALLOWED$1) { } - if (lineBreak !== BREAK_NOT_ALLOWED$1 || nextIndex === length) { - var value = new Break(codePoints, lineBreak, lastEnd, nextIndex); - lastEnd = nextIndex; - return { value: value, done: false }; - } - return { done: true, value: null }; - }, - }; - }; - - // https://www.w3.org/TR/css-syntax-3 - var FLAG_UNRESTRICTED = 1 << 0; - var FLAG_ID = 1 << 1; - var FLAG_INTEGER = 1 << 2; - var FLAG_NUMBER = 1 << 3; - var LINE_FEED = 0x000a; - var SOLIDUS = 0x002f; - var REVERSE_SOLIDUS = 0x005c; - var CHARACTER_TABULATION = 0x0009; - var SPACE = 0x0020; - var QUOTATION_MARK = 0x0022; - var EQUALS_SIGN = 0x003d; - var NUMBER_SIGN = 0x0023; - var DOLLAR_SIGN = 0x0024; - var PERCENTAGE_SIGN = 0x0025; - var APOSTROPHE = 0x0027; - var LEFT_PARENTHESIS = 0x0028; - var RIGHT_PARENTHESIS = 0x0029; - var LOW_LINE = 0x005f; - var HYPHEN_MINUS = 0x002d; - var EXCLAMATION_MARK = 0x0021; - var LESS_THAN_SIGN = 0x003c; - var GREATER_THAN_SIGN = 0x003e; - var COMMERCIAL_AT = 0x0040; - var LEFT_SQUARE_BRACKET = 0x005b; - var RIGHT_SQUARE_BRACKET = 0x005d; - var CIRCUMFLEX_ACCENT = 0x003d; - var LEFT_CURLY_BRACKET = 0x007b; - var QUESTION_MARK = 0x003f; - var RIGHT_CURLY_BRACKET = 0x007d; - var VERTICAL_LINE = 0x007c; - var TILDE = 0x007e; - var CONTROL = 0x0080; - var REPLACEMENT_CHARACTER = 0xfffd; - var ASTERISK = 0x002a; - var PLUS_SIGN = 0x002b; - var COMMA = 0x002c; - var COLON = 0x003a; - var SEMICOLON = 0x003b; - var FULL_STOP = 0x002e; - var NULL = 0x0000; - var BACKSPACE = 0x0008; - var LINE_TABULATION = 0x000b; - var SHIFT_OUT = 0x000e; - var INFORMATION_SEPARATOR_ONE = 0x001f; - var DELETE = 0x007f; - var EOF = -1; - var ZERO = 0x0030; - var a = 0x0061; - var e = 0x0065; - var f = 0x0066; - var u = 0x0075; - var z = 0x007a; - var A = 0x0041; - var E = 0x0045; - var F = 0x0046; - var U = 0x0055; - var Z = 0x005a; - var isDigit = function (codePoint) { return codePoint >= ZERO && codePoint <= 0x0039; }; - var isSurrogateCodePoint = function (codePoint) { return codePoint >= 0xd800 && codePoint <= 0xdfff; }; - var isHex = function (codePoint) { - return isDigit(codePoint) || (codePoint >= A && codePoint <= F) || (codePoint >= a && codePoint <= f); - }; - var isLowerCaseLetter = function (codePoint) { return codePoint >= a && codePoint <= z; }; - var isUpperCaseLetter = function (codePoint) { return codePoint >= A && codePoint <= Z; }; - var isLetter = function (codePoint) { return isLowerCaseLetter(codePoint) || isUpperCaseLetter(codePoint); }; - var isNonASCIICodePoint = function (codePoint) { return codePoint >= CONTROL; }; - var isWhiteSpace = function (codePoint) { - return codePoint === LINE_FEED || codePoint === CHARACTER_TABULATION || codePoint === SPACE; - }; - var isNameStartCodePoint = function (codePoint) { - return isLetter(codePoint) || isNonASCIICodePoint(codePoint) || codePoint === LOW_LINE; - }; - var isNameCodePoint = function (codePoint) { - return isNameStartCodePoint(codePoint) || isDigit(codePoint) || codePoint === HYPHEN_MINUS; - }; - var isNonPrintableCodePoint = function (codePoint) { - return ((codePoint >= NULL && codePoint <= BACKSPACE) || - codePoint === LINE_TABULATION || - (codePoint >= SHIFT_OUT && codePoint <= INFORMATION_SEPARATOR_ONE) || - codePoint === DELETE); - }; - var isValidEscape = function (c1, c2) { - if (c1 !== REVERSE_SOLIDUS) { - return false; - } - return c2 !== LINE_FEED; - }; - var isIdentifierStart = function (c1, c2, c3) { - if (c1 === HYPHEN_MINUS) { - return isNameStartCodePoint(c2) || isValidEscape(c2, c3); - } - else if (isNameStartCodePoint(c1)) { - return true; - } - else if (c1 === REVERSE_SOLIDUS && isValidEscape(c1, c2)) { - return true; - } - return false; - }; - var isNumberStart = function (c1, c2, c3) { - if (c1 === PLUS_SIGN || c1 === HYPHEN_MINUS) { - if (isDigit(c2)) { - return true; - } - return c2 === FULL_STOP && isDigit(c3); - } - if (c1 === FULL_STOP) { - return isDigit(c2); - } - return isDigit(c1); - }; - var stringToNumber = function (codePoints) { - var c = 0; - var sign = 1; - if (codePoints[c] === PLUS_SIGN || codePoints[c] === HYPHEN_MINUS) { - if (codePoints[c] === HYPHEN_MINUS) { - sign = -1; - } - c++; - } - var integers = []; - while (isDigit(codePoints[c])) { - integers.push(codePoints[c++]); - } - var int = integers.length ? parseInt(fromCodePoint$1.apply(void 0, integers), 10) : 0; - if (codePoints[c] === FULL_STOP) { - c++; - } - var fraction = []; - while (isDigit(codePoints[c])) { - fraction.push(codePoints[c++]); - } - var fracd = fraction.length; - var frac = fracd ? parseInt(fromCodePoint$1.apply(void 0, fraction), 10) : 0; - if (codePoints[c] === E || codePoints[c] === e) { - c++; - } - var expsign = 1; - if (codePoints[c] === PLUS_SIGN || codePoints[c] === HYPHEN_MINUS) { - if (codePoints[c] === HYPHEN_MINUS) { - expsign = -1; - } - c++; - } - var exponent = []; - while (isDigit(codePoints[c])) { - exponent.push(codePoints[c++]); - } - var exp = exponent.length ? parseInt(fromCodePoint$1.apply(void 0, exponent), 10) : 0; - return sign * (int + frac * Math.pow(10, -fracd)) * Math.pow(10, expsign * exp); - }; - var LEFT_PARENTHESIS_TOKEN = { - type: 2 /* LEFT_PARENTHESIS_TOKEN */ - }; - var RIGHT_PARENTHESIS_TOKEN = { - type: 3 /* RIGHT_PARENTHESIS_TOKEN */ - }; - var COMMA_TOKEN = { type: 4 /* COMMA_TOKEN */ }; - var SUFFIX_MATCH_TOKEN = { type: 13 /* SUFFIX_MATCH_TOKEN */ }; - var PREFIX_MATCH_TOKEN = { type: 8 /* PREFIX_MATCH_TOKEN */ }; - var COLUMN_TOKEN = { type: 21 /* COLUMN_TOKEN */ }; - var DASH_MATCH_TOKEN = { type: 9 /* DASH_MATCH_TOKEN */ }; - var INCLUDE_MATCH_TOKEN = { type: 10 /* INCLUDE_MATCH_TOKEN */ }; - var LEFT_CURLY_BRACKET_TOKEN = { - type: 11 /* LEFT_CURLY_BRACKET_TOKEN */ - }; - var RIGHT_CURLY_BRACKET_TOKEN = { - type: 12 /* RIGHT_CURLY_BRACKET_TOKEN */ - }; - var SUBSTRING_MATCH_TOKEN = { type: 14 /* SUBSTRING_MATCH_TOKEN */ }; - var BAD_URL_TOKEN = { type: 23 /* BAD_URL_TOKEN */ }; - var BAD_STRING_TOKEN = { type: 1 /* BAD_STRING_TOKEN */ }; - var CDO_TOKEN = { type: 25 /* CDO_TOKEN */ }; - var CDC_TOKEN = { type: 24 /* CDC_TOKEN */ }; - var COLON_TOKEN = { type: 26 /* COLON_TOKEN */ }; - var SEMICOLON_TOKEN = { type: 27 /* SEMICOLON_TOKEN */ }; - var LEFT_SQUARE_BRACKET_TOKEN = { - type: 28 /* LEFT_SQUARE_BRACKET_TOKEN */ - }; - var RIGHT_SQUARE_BRACKET_TOKEN = { - type: 29 /* RIGHT_SQUARE_BRACKET_TOKEN */ - }; - var WHITESPACE_TOKEN = { type: 31 /* WHITESPACE_TOKEN */ }; - var EOF_TOKEN = { type: 32 /* EOF_TOKEN */ }; - var Tokenizer = /** @class */ (function () { - function Tokenizer() { - this._value = []; - } - Tokenizer.prototype.write = function (chunk) { - this._value = this._value.concat(toCodePoints$1(chunk)); - }; - Tokenizer.prototype.read = function () { - var tokens = []; - var token = this.consumeToken(); - while (token !== EOF_TOKEN) { - tokens.push(token); - token = this.consumeToken(); - } - return tokens; - }; - Tokenizer.prototype.consumeToken = function () { - var codePoint = this.consumeCodePoint(); - switch (codePoint) { - case QUOTATION_MARK: - return this.consumeStringToken(QUOTATION_MARK); - case NUMBER_SIGN: - var c1 = this.peekCodePoint(0); - var c2 = this.peekCodePoint(1); - var c3 = this.peekCodePoint(2); - if (isNameCodePoint(c1) || isValidEscape(c2, c3)) { - var flags = isIdentifierStart(c1, c2, c3) ? FLAG_ID : FLAG_UNRESTRICTED; - var value = this.consumeName(); - return { type: 5 /* HASH_TOKEN */, value: value, flags: flags }; - } - break; - case DOLLAR_SIGN: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return SUFFIX_MATCH_TOKEN; - } - break; - case APOSTROPHE: - return this.consumeStringToken(APOSTROPHE); - case LEFT_PARENTHESIS: - return LEFT_PARENTHESIS_TOKEN; - case RIGHT_PARENTHESIS: - return RIGHT_PARENTHESIS_TOKEN; - case ASTERISK: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return SUBSTRING_MATCH_TOKEN; - } - break; - case PLUS_SIGN: - if (isNumberStart(codePoint, this.peekCodePoint(0), this.peekCodePoint(1))) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - break; - case COMMA: - return COMMA_TOKEN; - case HYPHEN_MINUS: - var e1 = codePoint; - var e2 = this.peekCodePoint(0); - var e3 = this.peekCodePoint(1); - if (isNumberStart(e1, e2, e3)) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - if (isIdentifierStart(e1, e2, e3)) { - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - } - if (e2 === HYPHEN_MINUS && e3 === GREATER_THAN_SIGN) { - this.consumeCodePoint(); - this.consumeCodePoint(); - return CDC_TOKEN; - } - break; - case FULL_STOP: - if (isNumberStart(codePoint, this.peekCodePoint(0), this.peekCodePoint(1))) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - break; - case SOLIDUS: - if (this.peekCodePoint(0) === ASTERISK) { - this.consumeCodePoint(); - while (true) { - var c = this.consumeCodePoint(); - if (c === ASTERISK) { - c = this.consumeCodePoint(); - if (c === SOLIDUS) { - return this.consumeToken(); - } - } - if (c === EOF) { - return this.consumeToken(); - } - } - } - break; - case COLON: - return COLON_TOKEN; - case SEMICOLON: - return SEMICOLON_TOKEN; - case LESS_THAN_SIGN: - if (this.peekCodePoint(0) === EXCLAMATION_MARK && - this.peekCodePoint(1) === HYPHEN_MINUS && - this.peekCodePoint(2) === HYPHEN_MINUS) { - this.consumeCodePoint(); - this.consumeCodePoint(); - return CDO_TOKEN; - } - break; - case COMMERCIAL_AT: - var a1 = this.peekCodePoint(0); - var a2 = this.peekCodePoint(1); - var a3 = this.peekCodePoint(2); - if (isIdentifierStart(a1, a2, a3)) { - var value = this.consumeName(); - return { type: 7 /* AT_KEYWORD_TOKEN */, value: value }; - } - break; - case LEFT_SQUARE_BRACKET: - return LEFT_SQUARE_BRACKET_TOKEN; - case REVERSE_SOLIDUS: - if (isValidEscape(codePoint, this.peekCodePoint(0))) { - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - } - break; - case RIGHT_SQUARE_BRACKET: - return RIGHT_SQUARE_BRACKET_TOKEN; - case CIRCUMFLEX_ACCENT: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return PREFIX_MATCH_TOKEN; - } - break; - case LEFT_CURLY_BRACKET: - return LEFT_CURLY_BRACKET_TOKEN; - case RIGHT_CURLY_BRACKET: - return RIGHT_CURLY_BRACKET_TOKEN; - case u: - case U: - var u1 = this.peekCodePoint(0); - var u2 = this.peekCodePoint(1); - if (u1 === PLUS_SIGN && (isHex(u2) || u2 === QUESTION_MARK)) { - this.consumeCodePoint(); - this.consumeUnicodeRangeToken(); - } - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - case VERTICAL_LINE: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return DASH_MATCH_TOKEN; - } - if (this.peekCodePoint(0) === VERTICAL_LINE) { - this.consumeCodePoint(); - return COLUMN_TOKEN; - } - break; - case TILDE: - if (this.peekCodePoint(0) === EQUALS_SIGN) { - this.consumeCodePoint(); - return INCLUDE_MATCH_TOKEN; - } - break; - case EOF: - return EOF_TOKEN; - } - if (isWhiteSpace(codePoint)) { - this.consumeWhiteSpace(); - return WHITESPACE_TOKEN; - } - if (isDigit(codePoint)) { - this.reconsumeCodePoint(codePoint); - return this.consumeNumericToken(); - } - if (isNameStartCodePoint(codePoint)) { - this.reconsumeCodePoint(codePoint); - return this.consumeIdentLikeToken(); - } - return { type: 6 /* DELIM_TOKEN */, value: fromCodePoint$1(codePoint) }; - }; - Tokenizer.prototype.consumeCodePoint = function () { - var value = this._value.shift(); - return typeof value === 'undefined' ? -1 : value; - }; - Tokenizer.prototype.reconsumeCodePoint = function (codePoint) { - this._value.unshift(codePoint); - }; - Tokenizer.prototype.peekCodePoint = function (delta) { - if (delta >= this._value.length) { - return -1; - } - return this._value[delta]; - }; - Tokenizer.prototype.consumeUnicodeRangeToken = function () { - var digits = []; - var codePoint = this.consumeCodePoint(); - while (isHex(codePoint) && digits.length < 6) { - digits.push(codePoint); - codePoint = this.consumeCodePoint(); - } - var questionMarks = false; - while (codePoint === QUESTION_MARK && digits.length < 6) { - digits.push(codePoint); - codePoint = this.consumeCodePoint(); - questionMarks = true; - } - if (questionMarks) { - var start_1 = parseInt(fromCodePoint$1.apply(void 0, digits.map(function (digit) { return (digit === QUESTION_MARK ? ZERO : digit); })), 16); - var end = parseInt(fromCodePoint$1.apply(void 0, digits.map(function (digit) { return (digit === QUESTION_MARK ? F : digit); })), 16); - return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start_1, end: end }; - } - var start = parseInt(fromCodePoint$1.apply(void 0, digits), 16); - if (this.peekCodePoint(0) === HYPHEN_MINUS && isHex(this.peekCodePoint(1))) { - this.consumeCodePoint(); - codePoint = this.consumeCodePoint(); - var endDigits = []; - while (isHex(codePoint) && endDigits.length < 6) { - endDigits.push(codePoint); - codePoint = this.consumeCodePoint(); - } - var end = parseInt(fromCodePoint$1.apply(void 0, endDigits), 16); - return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start, end: end }; - } - else { - return { type: 30 /* UNICODE_RANGE_TOKEN */, start: start, end: start }; - } - }; - Tokenizer.prototype.consumeIdentLikeToken = function () { - var value = this.consumeName(); - if (value.toLowerCase() === 'url' && this.peekCodePoint(0) === LEFT_PARENTHESIS) { - this.consumeCodePoint(); - return this.consumeUrlToken(); - } - else if (this.peekCodePoint(0) === LEFT_PARENTHESIS) { - this.consumeCodePoint(); - return { type: 19 /* FUNCTION_TOKEN */, value: value }; - } - return { type: 20 /* IDENT_TOKEN */, value: value }; - }; - Tokenizer.prototype.consumeUrlToken = function () { - var value = []; - this.consumeWhiteSpace(); - if (this.peekCodePoint(0) === EOF) { - return { type: 22 /* URL_TOKEN */, value: '' }; - } - var next = this.peekCodePoint(0); - if (next === APOSTROPHE || next === QUOTATION_MARK) { - var stringToken = this.consumeStringToken(this.consumeCodePoint()); - if (stringToken.type === 0 /* STRING_TOKEN */) { - this.consumeWhiteSpace(); - if (this.peekCodePoint(0) === EOF || this.peekCodePoint(0) === RIGHT_PARENTHESIS) { - this.consumeCodePoint(); - return { type: 22 /* URL_TOKEN */, value: stringToken.value }; - } - } - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - while (true) { - var codePoint = this.consumeCodePoint(); - if (codePoint === EOF || codePoint === RIGHT_PARENTHESIS) { - return { type: 22 /* URL_TOKEN */, value: fromCodePoint$1.apply(void 0, value) }; - } - else if (isWhiteSpace(codePoint)) { - this.consumeWhiteSpace(); - if (this.peekCodePoint(0) === EOF || this.peekCodePoint(0) === RIGHT_PARENTHESIS) { - this.consumeCodePoint(); - return { type: 22 /* URL_TOKEN */, value: fromCodePoint$1.apply(void 0, value) }; - } - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - else if (codePoint === QUOTATION_MARK || - codePoint === APOSTROPHE || - codePoint === LEFT_PARENTHESIS || - isNonPrintableCodePoint(codePoint)) { - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - else if (codePoint === REVERSE_SOLIDUS) { - if (isValidEscape(codePoint, this.peekCodePoint(0))) { - value.push(this.consumeEscapedCodePoint()); - } - else { - this.consumeBadUrlRemnants(); - return BAD_URL_TOKEN; - } - } - else { - value.push(codePoint); - } - } - }; - Tokenizer.prototype.consumeWhiteSpace = function () { - while (isWhiteSpace(this.peekCodePoint(0))) { - this.consumeCodePoint(); - } - }; - Tokenizer.prototype.consumeBadUrlRemnants = function () { - while (true) { - var codePoint = this.consumeCodePoint(); - if (codePoint === RIGHT_PARENTHESIS || codePoint === EOF) { - return; - } - if (isValidEscape(codePoint, this.peekCodePoint(0))) { - this.consumeEscapedCodePoint(); - } - } - }; - Tokenizer.prototype.consumeStringSlice = function (count) { - var SLICE_STACK_SIZE = 50000; - var value = ''; - while (count > 0) { - var amount = Math.min(SLICE_STACK_SIZE, count); - value += fromCodePoint$1.apply(void 0, this._value.splice(0, amount)); - count -= amount; - } - this._value.shift(); - return value; - }; - Tokenizer.prototype.consumeStringToken = function (endingCodePoint) { - var value = ''; - var i = 0; - do { - var codePoint = this._value[i]; - if (codePoint === EOF || codePoint === undefined || codePoint === endingCodePoint) { - value += this.consumeStringSlice(i); - return { type: 0 /* STRING_TOKEN */, value: value }; - } - if (codePoint === LINE_FEED) { - this._value.splice(0, i); - return BAD_STRING_TOKEN; - } - if (codePoint === REVERSE_SOLIDUS) { - var next = this._value[i + 1]; - if (next !== EOF && next !== undefined) { - if (next === LINE_FEED) { - value += this.consumeStringSlice(i); - i = -1; - this._value.shift(); - } - else if (isValidEscape(codePoint, next)) { - value += this.consumeStringSlice(i); - value += fromCodePoint$1(this.consumeEscapedCodePoint()); - i = -1; - } - } - } - i++; - } while (true); - }; - Tokenizer.prototype.consumeNumber = function () { - var repr = []; - var type = FLAG_INTEGER; - var c1 = this.peekCodePoint(0); - if (c1 === PLUS_SIGN || c1 === HYPHEN_MINUS) { - repr.push(this.consumeCodePoint()); - } - while (isDigit(this.peekCodePoint(0))) { - repr.push(this.consumeCodePoint()); - } - c1 = this.peekCodePoint(0); - var c2 = this.peekCodePoint(1); - if (c1 === FULL_STOP && isDigit(c2)) { - repr.push(this.consumeCodePoint(), this.consumeCodePoint()); - type = FLAG_NUMBER; - while (isDigit(this.peekCodePoint(0))) { - repr.push(this.consumeCodePoint()); - } - } - c1 = this.peekCodePoint(0); - c2 = this.peekCodePoint(1); - var c3 = this.peekCodePoint(2); - if ((c1 === E || c1 === e) && (((c2 === PLUS_SIGN || c2 === HYPHEN_MINUS) && isDigit(c3)) || isDigit(c2))) { - repr.push(this.consumeCodePoint(), this.consumeCodePoint()); - type = FLAG_NUMBER; - while (isDigit(this.peekCodePoint(0))) { - repr.push(this.consumeCodePoint()); - } - } - return [stringToNumber(repr), type]; - }; - Tokenizer.prototype.consumeNumericToken = function () { - var _a = this.consumeNumber(), number = _a[0], flags = _a[1]; - var c1 = this.peekCodePoint(0); - var c2 = this.peekCodePoint(1); - var c3 = this.peekCodePoint(2); - if (isIdentifierStart(c1, c2, c3)) { - var unit = this.consumeName(); - return { type: 15 /* DIMENSION_TOKEN */, number: number, flags: flags, unit: unit }; - } - if (c1 === PERCENTAGE_SIGN) { - this.consumeCodePoint(); - return { type: 16 /* PERCENTAGE_TOKEN */, number: number, flags: flags }; - } - return { type: 17 /* NUMBER_TOKEN */, number: number, flags: flags }; - }; - Tokenizer.prototype.consumeEscapedCodePoint = function () { - var codePoint = this.consumeCodePoint(); - if (isHex(codePoint)) { - var hex = fromCodePoint$1(codePoint); - while (isHex(this.peekCodePoint(0)) && hex.length < 6) { - hex += fromCodePoint$1(this.consumeCodePoint()); - } - if (isWhiteSpace(this.peekCodePoint(0))) { - this.consumeCodePoint(); - } - var hexCodePoint = parseInt(hex, 16); - if (hexCodePoint === 0 || isSurrogateCodePoint(hexCodePoint) || hexCodePoint > 0x10ffff) { - return REPLACEMENT_CHARACTER; - } - return hexCodePoint; - } - if (codePoint === EOF) { - return REPLACEMENT_CHARACTER; - } - return codePoint; - }; - Tokenizer.prototype.consumeName = function () { - var result = ''; - while (true) { - var codePoint = this.consumeCodePoint(); - if (isNameCodePoint(codePoint)) { - result += fromCodePoint$1(codePoint); - } - else if (isValidEscape(codePoint, this.peekCodePoint(0))) { - result += fromCodePoint$1(this.consumeEscapedCodePoint()); - } - else { - this.reconsumeCodePoint(codePoint); - return result; - } - } - }; - return Tokenizer; - }()); - - var Parser = /** @class */ (function () { - function Parser(tokens) { - this._tokens = tokens; - } - Parser.create = function (value) { - var tokenizer = new Tokenizer(); - tokenizer.write(value); - return new Parser(tokenizer.read()); - }; - Parser.parseValue = function (value) { - return Parser.create(value).parseComponentValue(); - }; - Parser.parseValues = function (value) { - return Parser.create(value).parseComponentValues(); - }; - Parser.prototype.parseComponentValue = function () { - var token = this.consumeToken(); - while (token.type === 31 /* WHITESPACE_TOKEN */) { - token = this.consumeToken(); - } - if (token.type === 32 /* EOF_TOKEN */) { - throw new SyntaxError("Error parsing CSS component value, unexpected EOF"); - } - this.reconsumeToken(token); - var value = this.consumeComponentValue(); - do { - token = this.consumeToken(); - } while (token.type === 31 /* WHITESPACE_TOKEN */); - if (token.type === 32 /* EOF_TOKEN */) { - return value; - } - throw new SyntaxError("Error parsing CSS component value, multiple values found when expecting only one"); - }; - Parser.prototype.parseComponentValues = function () { - var values = []; - while (true) { - var value = this.consumeComponentValue(); - if (value.type === 32 /* EOF_TOKEN */) { - return values; - } - values.push(value); - values.push(); - } - }; - Parser.prototype.consumeComponentValue = function () { - var token = this.consumeToken(); - switch (token.type) { - case 11 /* LEFT_CURLY_BRACKET_TOKEN */: - case 28 /* LEFT_SQUARE_BRACKET_TOKEN */: - case 2 /* LEFT_PARENTHESIS_TOKEN */: - return this.consumeSimpleBlock(token.type); - case 19 /* FUNCTION_TOKEN */: - return this.consumeFunction(token); - } - return token; - }; - Parser.prototype.consumeSimpleBlock = function (type) { - var block = { type: type, values: [] }; - var token = this.consumeToken(); - while (true) { - if (token.type === 32 /* EOF_TOKEN */ || isEndingTokenFor(token, type)) { - return block; - } - this.reconsumeToken(token); - block.values.push(this.consumeComponentValue()); - token = this.consumeToken(); - } - }; - Parser.prototype.consumeFunction = function (functionToken) { - var cssFunction = { - name: functionToken.value, - values: [], - type: 18 /* FUNCTION */ - }; - while (true) { - var token = this.consumeToken(); - if (token.type === 32 /* EOF_TOKEN */ || token.type === 3 /* RIGHT_PARENTHESIS_TOKEN */) { - return cssFunction; - } - this.reconsumeToken(token); - cssFunction.values.push(this.consumeComponentValue()); - } - }; - Parser.prototype.consumeToken = function () { - var token = this._tokens.shift(); - return typeof token === 'undefined' ? EOF_TOKEN : token; - }; - Parser.prototype.reconsumeToken = function (token) { - this._tokens.unshift(token); - }; - return Parser; - }()); - var isDimensionToken = function (token) { return token.type === 15 /* DIMENSION_TOKEN */; }; - var isNumberToken = function (token) { return token.type === 17 /* NUMBER_TOKEN */; }; - var isIdentToken = function (token) { return token.type === 20 /* IDENT_TOKEN */; }; - var isStringToken = function (token) { return token.type === 0 /* STRING_TOKEN */; }; - var isIdentWithValue = function (token, value) { - return isIdentToken(token) && token.value === value; - }; - var nonWhiteSpace = function (token) { return token.type !== 31 /* WHITESPACE_TOKEN */; }; - var nonFunctionArgSeparator = function (token) { - return token.type !== 31 /* WHITESPACE_TOKEN */ && token.type !== 4 /* COMMA_TOKEN */; - }; - var parseFunctionArgs = function (tokens) { - var args = []; - var arg = []; - tokens.forEach(function (token) { - if (token.type === 4 /* COMMA_TOKEN */) { - if (arg.length === 0) { - throw new Error("Error parsing function args, zero tokens for arg"); - } - args.push(arg); - arg = []; - return; - } - if (token.type !== 31 /* WHITESPACE_TOKEN */) { - arg.push(token); - } - }); - if (arg.length) { - args.push(arg); - } - return args; - }; - var isEndingTokenFor = function (token, type) { - if (type === 11 /* LEFT_CURLY_BRACKET_TOKEN */ && token.type === 12 /* RIGHT_CURLY_BRACKET_TOKEN */) { - return true; - } - if (type === 28 /* LEFT_SQUARE_BRACKET_TOKEN */ && token.type === 29 /* RIGHT_SQUARE_BRACKET_TOKEN */) { - return true; - } - return type === 2 /* LEFT_PARENTHESIS_TOKEN */ && token.type === 3 /* RIGHT_PARENTHESIS_TOKEN */; - }; - - var isLength = function (token) { - return token.type === 17 /* NUMBER_TOKEN */ || token.type === 15 /* DIMENSION_TOKEN */; - }; - - var isLengthPercentage = function (token) { - return token.type === 16 /* PERCENTAGE_TOKEN */ || isLength(token); - }; - var parseLengthPercentageTuple = function (tokens) { - return tokens.length > 1 ? [tokens[0], tokens[1]] : [tokens[0]]; - }; - var ZERO_LENGTH = { - type: 17 /* NUMBER_TOKEN */, - number: 0, - flags: FLAG_INTEGER - }; - var FIFTY_PERCENT = { - type: 16 /* PERCENTAGE_TOKEN */, - number: 50, - flags: FLAG_INTEGER - }; - var HUNDRED_PERCENT = { - type: 16 /* PERCENTAGE_TOKEN */, - number: 100, - flags: FLAG_INTEGER - }; - var getAbsoluteValueForTuple = function (tuple, width, height) { - var x = tuple[0], y = tuple[1]; - return [getAbsoluteValue(x, width), getAbsoluteValue(typeof y !== 'undefined' ? y : x, height)]; - }; - var getAbsoluteValue = function (token, parent) { - if (token.type === 16 /* PERCENTAGE_TOKEN */) { - return (token.number / 100) * parent; - } - if (isDimensionToken(token)) { - switch (token.unit) { - case 'rem': - case 'em': - return 16 * token.number; // TODO use correct font-size - case 'px': - default: - return token.number; - } - } - return token.number; - }; - - var DEG = 'deg'; - var GRAD = 'grad'; - var RAD = 'rad'; - var TURN = 'turn'; - var angle = { - name: 'angle', - parse: function (_context, value) { - if (value.type === 15 /* DIMENSION_TOKEN */) { - switch (value.unit) { - case DEG: - return (Math.PI * value.number) / 180; - case GRAD: - return (Math.PI / 200) * value.number; - case RAD: - return value.number; - case TURN: - return Math.PI * 2 * value.number; - } - } - throw new Error("Unsupported angle type"); - } - }; - var isAngle = function (value) { - if (value.type === 15 /* DIMENSION_TOKEN */) { - if (value.unit === DEG || value.unit === GRAD || value.unit === RAD || value.unit === TURN) { - return true; - } - } - return false; - }; - var parseNamedSide = function (tokens) { - var sideOrCorner = tokens - .filter(isIdentToken) - .map(function (ident) { return ident.value; }) - .join(' '); - switch (sideOrCorner) { - case 'to bottom right': - case 'to right bottom': - case 'left top': - case 'top left': - return [ZERO_LENGTH, ZERO_LENGTH]; - case 'to top': - case 'bottom': - return deg(0); - case 'to bottom left': - case 'to left bottom': - case 'right top': - case 'top right': - return [ZERO_LENGTH, HUNDRED_PERCENT]; - case 'to right': - case 'left': - return deg(90); - case 'to top left': - case 'to left top': - case 'right bottom': - case 'bottom right': - return [HUNDRED_PERCENT, HUNDRED_PERCENT]; - case 'to bottom': - case 'top': - return deg(180); - case 'to top right': - case 'to right top': - case 'left bottom': - case 'bottom left': - return [HUNDRED_PERCENT, ZERO_LENGTH]; - case 'to left': - case 'right': - return deg(270); - } - return 0; - }; - var deg = function (deg) { return (Math.PI * deg) / 180; }; - - var color$1 = { - name: 'color', - parse: function (context, value) { - if (value.type === 18 /* FUNCTION */) { - var colorFunction = SUPPORTED_COLOR_FUNCTIONS[value.name]; - if (typeof colorFunction === 'undefined') { - throw new Error("Attempting to parse an unsupported color function \"" + value.name + "\""); - } - return colorFunction(context, value.values); - } - if (value.type === 5 /* HASH_TOKEN */) { - if (value.value.length === 3) { - var r = value.value.substring(0, 1); - var g = value.value.substring(1, 2); - var b = value.value.substring(2, 3); - return pack(parseInt(r + r, 16), parseInt(g + g, 16), parseInt(b + b, 16), 1); - } - if (value.value.length === 4) { - var r = value.value.substring(0, 1); - var g = value.value.substring(1, 2); - var b = value.value.substring(2, 3); - var a = value.value.substring(3, 4); - return pack(parseInt(r + r, 16), parseInt(g + g, 16), parseInt(b + b, 16), parseInt(a + a, 16) / 255); - } - if (value.value.length === 6) { - var r = value.value.substring(0, 2); - var g = value.value.substring(2, 4); - var b = value.value.substring(4, 6); - return pack(parseInt(r, 16), parseInt(g, 16), parseInt(b, 16), 1); - } - if (value.value.length === 8) { - var r = value.value.substring(0, 2); - var g = value.value.substring(2, 4); - var b = value.value.substring(4, 6); - var a = value.value.substring(6, 8); - return pack(parseInt(r, 16), parseInt(g, 16), parseInt(b, 16), parseInt(a, 16) / 255); - } - } - if (value.type === 20 /* IDENT_TOKEN */) { - var namedColor = COLORS[value.value.toUpperCase()]; - if (typeof namedColor !== 'undefined') { - return namedColor; - } - } - return COLORS.TRANSPARENT; - } - }; - var isTransparent = function (color) { return (0xff & color) === 0; }; - var asString = function (color) { - var alpha = 0xff & color; - var blue = 0xff & (color >> 8); - var green = 0xff & (color >> 16); - var red = 0xff & (color >> 24); - return alpha < 255 ? "rgba(" + red + "," + green + "," + blue + "," + alpha / 255 + ")" : "rgb(" + red + "," + green + "," + blue + ")"; - }; - var pack = function (r, g, b, a) { - return ((r << 24) | (g << 16) | (b << 8) | (Math.round(a * 255) << 0)) >>> 0; - }; - var getTokenColorValue = function (token, i) { - if (token.type === 17 /* NUMBER_TOKEN */) { - return token.number; - } - if (token.type === 16 /* PERCENTAGE_TOKEN */) { - var max = i === 3 ? 1 : 255; - return i === 3 ? (token.number / 100) * max : Math.round((token.number / 100) * max); - } - return 0; - }; - var rgb = function (_context, args) { - var tokens = args.filter(nonFunctionArgSeparator); - if (tokens.length === 3) { - var _a = tokens.map(getTokenColorValue), r = _a[0], g = _a[1], b = _a[2]; - return pack(r, g, b, 1); - } - if (tokens.length === 4) { - var _b = tokens.map(getTokenColorValue), r = _b[0], g = _b[1], b = _b[2], a = _b[3]; - return pack(r, g, b, a); - } - return 0; - }; - function hue2rgb(t1, t2, hue) { - if (hue < 0) { - hue += 1; - } - if (hue >= 1) { - hue -= 1; - } - if (hue < 1 / 6) { - return (t2 - t1) * hue * 6 + t1; - } - else if (hue < 1 / 2) { - return t2; - } - else if (hue < 2 / 3) { - return (t2 - t1) * 6 * (2 / 3 - hue) + t1; - } - else { - return t1; - } - } - var hsl = function (context, args) { - var tokens = args.filter(nonFunctionArgSeparator); - var hue = tokens[0], saturation = tokens[1], lightness = tokens[2], alpha = tokens[3]; - var h = (hue.type === 17 /* NUMBER_TOKEN */ ? deg(hue.number) : angle.parse(context, hue)) / (Math.PI * 2); - var s = isLengthPercentage(saturation) ? saturation.number / 100 : 0; - var l = isLengthPercentage(lightness) ? lightness.number / 100 : 0; - var a = typeof alpha !== 'undefined' && isLengthPercentage(alpha) ? getAbsoluteValue(alpha, 1) : 1; - if (s === 0) { - return pack(l * 255, l * 255, l * 255, 1); - } - var t2 = l <= 0.5 ? l * (s + 1) : l + s - l * s; - var t1 = l * 2 - t2; - var r = hue2rgb(t1, t2, h + 1 / 3); - var g = hue2rgb(t1, t2, h); - var b = hue2rgb(t1, t2, h - 1 / 3); - return pack(r * 255, g * 255, b * 255, a); - }; - var SUPPORTED_COLOR_FUNCTIONS = { - hsl: hsl, - hsla: hsl, - rgb: rgb, - rgba: rgb - }; - var parseColor = function (context, value) { - return color$1.parse(context, Parser.create(value).parseComponentValue()); - }; - var COLORS = { - ALICEBLUE: 0xf0f8ffff, - ANTIQUEWHITE: 0xfaebd7ff, - AQUA: 0x00ffffff, - AQUAMARINE: 0x7fffd4ff, - AZURE: 0xf0ffffff, - BEIGE: 0xf5f5dcff, - BISQUE: 0xffe4c4ff, - BLACK: 0x000000ff, - BLANCHEDALMOND: 0xffebcdff, - BLUE: 0x0000ffff, - BLUEVIOLET: 0x8a2be2ff, - BROWN: 0xa52a2aff, - BURLYWOOD: 0xdeb887ff, - CADETBLUE: 0x5f9ea0ff, - CHARTREUSE: 0x7fff00ff, - CHOCOLATE: 0xd2691eff, - CORAL: 0xff7f50ff, - CORNFLOWERBLUE: 0x6495edff, - CORNSILK: 0xfff8dcff, - CRIMSON: 0xdc143cff, - CYAN: 0x00ffffff, - DARKBLUE: 0x00008bff, - DARKCYAN: 0x008b8bff, - DARKGOLDENROD: 0xb886bbff, - DARKGRAY: 0xa9a9a9ff, - DARKGREEN: 0x006400ff, - DARKGREY: 0xa9a9a9ff, - DARKKHAKI: 0xbdb76bff, - DARKMAGENTA: 0x8b008bff, - DARKOLIVEGREEN: 0x556b2fff, - DARKORANGE: 0xff8c00ff, - DARKORCHID: 0x9932ccff, - DARKRED: 0x8b0000ff, - DARKSALMON: 0xe9967aff, - DARKSEAGREEN: 0x8fbc8fff, - DARKSLATEBLUE: 0x483d8bff, - DARKSLATEGRAY: 0x2f4f4fff, - DARKSLATEGREY: 0x2f4f4fff, - DARKTURQUOISE: 0x00ced1ff, - DARKVIOLET: 0x9400d3ff, - DEEPPINK: 0xff1493ff, - DEEPSKYBLUE: 0x00bfffff, - DIMGRAY: 0x696969ff, - DIMGREY: 0x696969ff, - DODGERBLUE: 0x1e90ffff, - FIREBRICK: 0xb22222ff, - FLORALWHITE: 0xfffaf0ff, - FORESTGREEN: 0x228b22ff, - FUCHSIA: 0xff00ffff, - GAINSBORO: 0xdcdcdcff, - GHOSTWHITE: 0xf8f8ffff, - GOLD: 0xffd700ff, - GOLDENROD: 0xdaa520ff, - GRAY: 0x808080ff, - GREEN: 0x008000ff, - GREENYELLOW: 0xadff2fff, - GREY: 0x808080ff, - HONEYDEW: 0xf0fff0ff, - HOTPINK: 0xff69b4ff, - INDIANRED: 0xcd5c5cff, - INDIGO: 0x4b0082ff, - IVORY: 0xfffff0ff, - KHAKI: 0xf0e68cff, - LAVENDER: 0xe6e6faff, - LAVENDERBLUSH: 0xfff0f5ff, - LAWNGREEN: 0x7cfc00ff, - LEMONCHIFFON: 0xfffacdff, - LIGHTBLUE: 0xadd8e6ff, - LIGHTCORAL: 0xf08080ff, - LIGHTCYAN: 0xe0ffffff, - LIGHTGOLDENRODYELLOW: 0xfafad2ff, - LIGHTGRAY: 0xd3d3d3ff, - LIGHTGREEN: 0x90ee90ff, - LIGHTGREY: 0xd3d3d3ff, - LIGHTPINK: 0xffb6c1ff, - LIGHTSALMON: 0xffa07aff, - LIGHTSEAGREEN: 0x20b2aaff, - LIGHTSKYBLUE: 0x87cefaff, - LIGHTSLATEGRAY: 0x778899ff, - LIGHTSLATEGREY: 0x778899ff, - LIGHTSTEELBLUE: 0xb0c4deff, - LIGHTYELLOW: 0xffffe0ff, - LIME: 0x00ff00ff, - LIMEGREEN: 0x32cd32ff, - LINEN: 0xfaf0e6ff, - MAGENTA: 0xff00ffff, - MAROON: 0x800000ff, - MEDIUMAQUAMARINE: 0x66cdaaff, - MEDIUMBLUE: 0x0000cdff, - MEDIUMORCHID: 0xba55d3ff, - MEDIUMPURPLE: 0x9370dbff, - MEDIUMSEAGREEN: 0x3cb371ff, - MEDIUMSLATEBLUE: 0x7b68eeff, - MEDIUMSPRINGGREEN: 0x00fa9aff, - MEDIUMTURQUOISE: 0x48d1ccff, - MEDIUMVIOLETRED: 0xc71585ff, - MIDNIGHTBLUE: 0x191970ff, - MINTCREAM: 0xf5fffaff, - MISTYROSE: 0xffe4e1ff, - MOCCASIN: 0xffe4b5ff, - NAVAJOWHITE: 0xffdeadff, - NAVY: 0x000080ff, - OLDLACE: 0xfdf5e6ff, - OLIVE: 0x808000ff, - OLIVEDRAB: 0x6b8e23ff, - ORANGE: 0xffa500ff, - ORANGERED: 0xff4500ff, - ORCHID: 0xda70d6ff, - PALEGOLDENROD: 0xeee8aaff, - PALEGREEN: 0x98fb98ff, - PALETURQUOISE: 0xafeeeeff, - PALEVIOLETRED: 0xdb7093ff, - PAPAYAWHIP: 0xffefd5ff, - PEACHPUFF: 0xffdab9ff, - PERU: 0xcd853fff, - PINK: 0xffc0cbff, - PLUM: 0xdda0ddff, - POWDERBLUE: 0xb0e0e6ff, - PURPLE: 0x800080ff, - REBECCAPURPLE: 0x663399ff, - RED: 0xff0000ff, - ROSYBROWN: 0xbc8f8fff, - ROYALBLUE: 0x4169e1ff, - SADDLEBROWN: 0x8b4513ff, - SALMON: 0xfa8072ff, - SANDYBROWN: 0xf4a460ff, - SEAGREEN: 0x2e8b57ff, - SEASHELL: 0xfff5eeff, - SIENNA: 0xa0522dff, - SILVER: 0xc0c0c0ff, - SKYBLUE: 0x87ceebff, - SLATEBLUE: 0x6a5acdff, - SLATEGRAY: 0x708090ff, - SLATEGREY: 0x708090ff, - SNOW: 0xfffafaff, - SPRINGGREEN: 0x00ff7fff, - STEELBLUE: 0x4682b4ff, - TAN: 0xd2b48cff, - TEAL: 0x008080ff, - THISTLE: 0xd8bfd8ff, - TOMATO: 0xff6347ff, - TRANSPARENT: 0x00000000, - TURQUOISE: 0x40e0d0ff, - VIOLET: 0xee82eeff, - WHEAT: 0xf5deb3ff, - WHITE: 0xffffffff, - WHITESMOKE: 0xf5f5f5ff, - YELLOW: 0xffff00ff, - YELLOWGREEN: 0x9acd32ff - }; - - var backgroundClip = { - name: 'background-clip', - initialValue: 'border-box', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.map(function (token) { - if (isIdentToken(token)) { - switch (token.value) { - case 'padding-box': - return 1 /* PADDING_BOX */; - case 'content-box': - return 2 /* CONTENT_BOX */; - } - } - return 0 /* BORDER_BOX */; - }); - } - }; - - var backgroundColor = { - name: "background-color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var parseColorStop = function (context, args) { - var color = color$1.parse(context, args[0]); - var stop = args[1]; - return stop && isLengthPercentage(stop) ? { color: color, stop: stop } : { color: color, stop: null }; - }; - var processColorStops = function (stops, lineLength) { - var first = stops[0]; - var last = stops[stops.length - 1]; - if (first.stop === null) { - first.stop = ZERO_LENGTH; - } - if (last.stop === null) { - last.stop = HUNDRED_PERCENT; - } - var processStops = []; - var previous = 0; - for (var i = 0; i < stops.length; i++) { - var stop_1 = stops[i].stop; - if (stop_1 !== null) { - var absoluteValue = getAbsoluteValue(stop_1, lineLength); - if (absoluteValue > previous) { - processStops.push(absoluteValue); - } - else { - processStops.push(previous); - } - previous = absoluteValue; - } - else { - processStops.push(null); - } - } - var gapBegin = null; - for (var i = 0; i < processStops.length; i++) { - var stop_2 = processStops[i]; - if (stop_2 === null) { - if (gapBegin === null) { - gapBegin = i; - } - } - else if (gapBegin !== null) { - var gapLength = i - gapBegin; - var beforeGap = processStops[gapBegin - 1]; - var gapValue = (stop_2 - beforeGap) / (gapLength + 1); - for (var g = 1; g <= gapLength; g++) { - processStops[gapBegin + g - 1] = gapValue * g; - } - gapBegin = null; - } - } - return stops.map(function (_a, i) { - var color = _a.color; - return { color: color, stop: Math.max(Math.min(1, processStops[i] / lineLength), 0) }; - }); - }; - var getAngleFromCorner = function (corner, width, height) { - var centerX = width / 2; - var centerY = height / 2; - var x = getAbsoluteValue(corner[0], width) - centerX; - var y = centerY - getAbsoluteValue(corner[1], height); - return (Math.atan2(y, x) + Math.PI * 2) % (Math.PI * 2); - }; - var calculateGradientDirection = function (angle, width, height) { - var radian = typeof angle === 'number' ? angle : getAngleFromCorner(angle, width, height); - var lineLength = Math.abs(width * Math.sin(radian)) + Math.abs(height * Math.cos(radian)); - var halfWidth = width / 2; - var halfHeight = height / 2; - var halfLineLength = lineLength / 2; - var yDiff = Math.sin(radian - Math.PI / 2) * halfLineLength; - var xDiff = Math.cos(radian - Math.PI / 2) * halfLineLength; - return [lineLength, halfWidth - xDiff, halfWidth + xDiff, halfHeight - yDiff, halfHeight + yDiff]; - }; - var distance = function (a, b) { return Math.sqrt(a * a + b * b); }; - var findCorner = function (width, height, x, y, closest) { - var corners = [ - [0, 0], - [0, height], - [width, 0], - [width, height] - ]; - return corners.reduce(function (stat, corner) { - var cx = corner[0], cy = corner[1]; - var d = distance(x - cx, y - cy); - if (closest ? d < stat.optimumDistance : d > stat.optimumDistance) { - return { - optimumCorner: corner, - optimumDistance: d - }; - } - return stat; - }, { - optimumDistance: closest ? Infinity : -Infinity, - optimumCorner: null - }).optimumCorner; - }; - var calculateRadius = function (gradient, x, y, width, height) { - var rx = 0; - var ry = 0; - switch (gradient.size) { - case 0 /* CLOSEST_SIDE */: - // The ending shape is sized so that that it exactly meets the side of the gradient box closest to the gradient’s center. - // If the shape is an ellipse, it exactly meets the closest side in each dimension. - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.min(Math.abs(x), Math.abs(x - width), Math.abs(y), Math.abs(y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - rx = Math.min(Math.abs(x), Math.abs(x - width)); - ry = Math.min(Math.abs(y), Math.abs(y - height)); - } - break; - case 2 /* CLOSEST_CORNER */: - // The ending shape is sized so that that it passes through the corner of the gradient box closest to the gradient’s center. - // If the shape is an ellipse, the ending shape is given the same aspect-ratio it would have if closest-side were specified. - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.min(distance(x, y), distance(x, y - height), distance(x - width, y), distance(x - width, y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - // Compute the ratio ry/rx (which is to be the same as for "closest-side") - var c = Math.min(Math.abs(y), Math.abs(y - height)) / Math.min(Math.abs(x), Math.abs(x - width)); - var _a = findCorner(width, height, x, y, true), cx = _a[0], cy = _a[1]; - rx = distance(cx - x, (cy - y) / c); - ry = c * rx; - } - break; - case 1 /* FARTHEST_SIDE */: - // Same as closest-side, except the ending shape is sized based on the farthest side(s) - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.max(Math.abs(x), Math.abs(x - width), Math.abs(y), Math.abs(y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - rx = Math.max(Math.abs(x), Math.abs(x - width)); - ry = Math.max(Math.abs(y), Math.abs(y - height)); - } - break; - case 3 /* FARTHEST_CORNER */: - // Same as closest-corner, except the ending shape is sized based on the farthest corner. - // If the shape is an ellipse, the ending shape is given the same aspect ratio it would have if farthest-side were specified. - if (gradient.shape === 0 /* CIRCLE */) { - rx = ry = Math.max(distance(x, y), distance(x, y - height), distance(x - width, y), distance(x - width, y - height)); - } - else if (gradient.shape === 1 /* ELLIPSE */) { - // Compute the ratio ry/rx (which is to be the same as for "farthest-side") - var c = Math.max(Math.abs(y), Math.abs(y - height)) / Math.max(Math.abs(x), Math.abs(x - width)); - var _b = findCorner(width, height, x, y, false), cx = _b[0], cy = _b[1]; - rx = distance(cx - x, (cy - y) / c); - ry = c * rx; - } - break; - } - if (Array.isArray(gradient.size)) { - rx = getAbsoluteValue(gradient.size[0], width); - ry = gradient.size.length === 2 ? getAbsoluteValue(gradient.size[1], height) : rx; - } - return [rx, ry]; - }; - - var linearGradient = function (context, tokens) { - var angle$1 = deg(180); - var stops = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - if (i === 0) { - var firstToken = arg[0]; - if (firstToken.type === 20 /* IDENT_TOKEN */ && firstToken.value === 'to') { - angle$1 = parseNamedSide(arg); - return; - } - else if (isAngle(firstToken)) { - angle$1 = angle.parse(context, firstToken); - return; - } - } - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - }); - return { angle: angle$1, stops: stops, type: 1 /* LINEAR_GRADIENT */ }; - }; - - var prefixLinearGradient = function (context, tokens) { - var angle$1 = deg(180); - var stops = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - if (i === 0) { - var firstToken = arg[0]; - if (firstToken.type === 20 /* IDENT_TOKEN */ && - ['top', 'left', 'right', 'bottom'].indexOf(firstToken.value) !== -1) { - angle$1 = parseNamedSide(arg); - return; - } - else if (isAngle(firstToken)) { - angle$1 = (angle.parse(context, firstToken) + deg(270)) % deg(360); - return; - } - } - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - }); - return { - angle: angle$1, - stops: stops, - type: 1 /* LINEAR_GRADIENT */ - }; - }; - - var webkitGradient = function (context, tokens) { - var angle = deg(180); - var stops = []; - var type = 1 /* LINEAR_GRADIENT */; - var shape = 0 /* CIRCLE */; - var size = 3 /* FARTHEST_CORNER */; - var position = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - var firstToken = arg[0]; - if (i === 0) { - if (isIdentToken(firstToken) && firstToken.value === 'linear') { - type = 1 /* LINEAR_GRADIENT */; - return; - } - else if (isIdentToken(firstToken) && firstToken.value === 'radial') { - type = 2 /* RADIAL_GRADIENT */; - return; - } - } - if (firstToken.type === 18 /* FUNCTION */) { - if (firstToken.name === 'from') { - var color = color$1.parse(context, firstToken.values[0]); - stops.push({ stop: ZERO_LENGTH, color: color }); - } - else if (firstToken.name === 'to') { - var color = color$1.parse(context, firstToken.values[0]); - stops.push({ stop: HUNDRED_PERCENT, color: color }); - } - else if (firstToken.name === 'color-stop') { - var values = firstToken.values.filter(nonFunctionArgSeparator); - if (values.length === 2) { - var color = color$1.parse(context, values[1]); - var stop_1 = values[0]; - if (isNumberToken(stop_1)) { - stops.push({ - stop: { type: 16 /* PERCENTAGE_TOKEN */, number: stop_1.number * 100, flags: stop_1.flags }, - color: color - }); - } - } - } - } - }); - return type === 1 /* LINEAR_GRADIENT */ - ? { - angle: (angle + deg(180)) % deg(360), - stops: stops, - type: type - } - : { size: size, shape: shape, stops: stops, position: position, type: type }; - }; - - var CLOSEST_SIDE = 'closest-side'; - var FARTHEST_SIDE = 'farthest-side'; - var CLOSEST_CORNER = 'closest-corner'; - var FARTHEST_CORNER = 'farthest-corner'; - var CIRCLE = 'circle'; - var ELLIPSE = 'ellipse'; - var COVER = 'cover'; - var CONTAIN = 'contain'; - var radialGradient = function (context, tokens) { - var shape = 0 /* CIRCLE */; - var size = 3 /* FARTHEST_CORNER */; - var stops = []; - var position = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - var isColorStop = true; - if (i === 0) { - var isAtPosition_1 = false; - isColorStop = arg.reduce(function (acc, token) { - if (isAtPosition_1) { - if (isIdentToken(token)) { - switch (token.value) { - case 'center': - position.push(FIFTY_PERCENT); - return acc; - case 'top': - case 'left': - position.push(ZERO_LENGTH); - return acc; - case 'right': - case 'bottom': - position.push(HUNDRED_PERCENT); - return acc; - } - } - else if (isLengthPercentage(token) || isLength(token)) { - position.push(token); - } - } - else if (isIdentToken(token)) { - switch (token.value) { - case CIRCLE: - shape = 0 /* CIRCLE */; - return false; - case ELLIPSE: - shape = 1 /* ELLIPSE */; - return false; - case 'at': - isAtPosition_1 = true; - return false; - case CLOSEST_SIDE: - size = 0 /* CLOSEST_SIDE */; - return false; - case COVER: - case FARTHEST_SIDE: - size = 1 /* FARTHEST_SIDE */; - return false; - case CONTAIN: - case CLOSEST_CORNER: - size = 2 /* CLOSEST_CORNER */; - return false; - case FARTHEST_CORNER: - size = 3 /* FARTHEST_CORNER */; - return false; - } - } - else if (isLength(token) || isLengthPercentage(token)) { - if (!Array.isArray(size)) { - size = []; - } - size.push(token); - return false; - } - return acc; - }, isColorStop); - } - if (isColorStop) { - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - } - }); - return { size: size, shape: shape, stops: stops, position: position, type: 2 /* RADIAL_GRADIENT */ }; - }; - - var prefixRadialGradient = function (context, tokens) { - var shape = 0 /* CIRCLE */; - var size = 3 /* FARTHEST_CORNER */; - var stops = []; - var position = []; - parseFunctionArgs(tokens).forEach(function (arg, i) { - var isColorStop = true; - if (i === 0) { - isColorStop = arg.reduce(function (acc, token) { - if (isIdentToken(token)) { - switch (token.value) { - case 'center': - position.push(FIFTY_PERCENT); - return false; - case 'top': - case 'left': - position.push(ZERO_LENGTH); - return false; - case 'right': - case 'bottom': - position.push(HUNDRED_PERCENT); - return false; - } - } - else if (isLengthPercentage(token) || isLength(token)) { - position.push(token); - return false; - } - return acc; - }, isColorStop); - } - else if (i === 1) { - isColorStop = arg.reduce(function (acc, token) { - if (isIdentToken(token)) { - switch (token.value) { - case CIRCLE: - shape = 0 /* CIRCLE */; - return false; - case ELLIPSE: - shape = 1 /* ELLIPSE */; - return false; - case CONTAIN: - case CLOSEST_SIDE: - size = 0 /* CLOSEST_SIDE */; - return false; - case FARTHEST_SIDE: - size = 1 /* FARTHEST_SIDE */; - return false; - case CLOSEST_CORNER: - size = 2 /* CLOSEST_CORNER */; - return false; - case COVER: - case FARTHEST_CORNER: - size = 3 /* FARTHEST_CORNER */; - return false; - } - } - else if (isLength(token) || isLengthPercentage(token)) { - if (!Array.isArray(size)) { - size = []; - } - size.push(token); - return false; - } - return acc; - }, isColorStop); - } - if (isColorStop) { - var colorStop = parseColorStop(context, arg); - stops.push(colorStop); - } - }); - return { size: size, shape: shape, stops: stops, position: position, type: 2 /* RADIAL_GRADIENT */ }; - }; - - var isLinearGradient = function (background) { - return background.type === 1 /* LINEAR_GRADIENT */; - }; - var isRadialGradient = function (background) { - return background.type === 2 /* RADIAL_GRADIENT */; - }; - var image = { - name: 'image', - parse: function (context, value) { - if (value.type === 22 /* URL_TOKEN */) { - var image_1 = { url: value.value, type: 0 /* URL */ }; - context.cache.addImage(value.value); - return image_1; - } - if (value.type === 18 /* FUNCTION */) { - var imageFunction = SUPPORTED_IMAGE_FUNCTIONS[value.name]; - if (typeof imageFunction === 'undefined') { - throw new Error("Attempting to parse an unsupported image function \"" + value.name + "\""); - } - return imageFunction(context, value.values); - } - throw new Error("Unsupported image type " + value.type); - } - }; - function isSupportedImage(value) { - return (!(value.type === 20 /* IDENT_TOKEN */ && value.value === 'none') && - (value.type !== 18 /* FUNCTION */ || !!SUPPORTED_IMAGE_FUNCTIONS[value.name])); - } - var SUPPORTED_IMAGE_FUNCTIONS = { - 'linear-gradient': linearGradient, - '-moz-linear-gradient': prefixLinearGradient, - '-ms-linear-gradient': prefixLinearGradient, - '-o-linear-gradient': prefixLinearGradient, - '-webkit-linear-gradient': prefixLinearGradient, - 'radial-gradient': radialGradient, - '-moz-radial-gradient': prefixRadialGradient, - '-ms-radial-gradient': prefixRadialGradient, - '-o-radial-gradient': prefixRadialGradient, - '-webkit-radial-gradient': prefixRadialGradient, - '-webkit-gradient': webkitGradient - }; - - var backgroundImage = { - name: 'background-image', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (context, tokens) { - if (tokens.length === 0) { - return []; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return []; - } - return tokens - .filter(function (value) { return nonFunctionArgSeparator(value) && isSupportedImage(value); }) - .map(function (value) { return image.parse(context, value); }); - } - }; - - var backgroundOrigin = { - name: 'background-origin', - initialValue: 'border-box', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.map(function (token) { - if (isIdentToken(token)) { - switch (token.value) { - case 'padding-box': - return 1 /* PADDING_BOX */; - case 'content-box': - return 2 /* CONTENT_BOX */; - } - } - return 0 /* BORDER_BOX */; - }); - } - }; - - var backgroundPosition = { - name: 'background-position', - initialValue: '0% 0%', - type: 1 /* LIST */, - prefix: false, - parse: function (_context, tokens) { - return parseFunctionArgs(tokens) - .map(function (values) { return values.filter(isLengthPercentage); }) - .map(parseLengthPercentageTuple); - } - }; - - var backgroundRepeat = { - name: 'background-repeat', - initialValue: 'repeat', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return parseFunctionArgs(tokens) - .map(function (values) { - return values - .filter(isIdentToken) - .map(function (token) { return token.value; }) - .join(' '); - }) - .map(parseBackgroundRepeat); - } - }; - var parseBackgroundRepeat = function (value) { - switch (value) { - case 'no-repeat': - return 1 /* NO_REPEAT */; - case 'repeat-x': - case 'repeat no-repeat': - return 2 /* REPEAT_X */; - case 'repeat-y': - case 'no-repeat repeat': - return 3 /* REPEAT_Y */; - case 'repeat': - default: - return 0 /* REPEAT */; - } - }; - - var BACKGROUND_SIZE; - (function (BACKGROUND_SIZE) { - BACKGROUND_SIZE["AUTO"] = "auto"; - BACKGROUND_SIZE["CONTAIN"] = "contain"; - BACKGROUND_SIZE["COVER"] = "cover"; - })(BACKGROUND_SIZE || (BACKGROUND_SIZE = {})); - var backgroundSize = { - name: 'background-size', - initialValue: '0', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return parseFunctionArgs(tokens).map(function (values) { return values.filter(isBackgroundSizeInfoToken); }); - } - }; - var isBackgroundSizeInfoToken = function (value) { - return isIdentToken(value) || isLengthPercentage(value); - }; - - var borderColorForSide = function (side) { return ({ - name: "border-" + side + "-color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }); }; - var borderTopColor = borderColorForSide('top'); - var borderRightColor = borderColorForSide('right'); - var borderBottomColor = borderColorForSide('bottom'); - var borderLeftColor = borderColorForSide('left'); - - var borderRadiusForSide = function (side) { return ({ - name: "border-radius-" + side, - initialValue: '0 0', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return parseLengthPercentageTuple(tokens.filter(isLengthPercentage)); - } - }); }; - var borderTopLeftRadius = borderRadiusForSide('top-left'); - var borderTopRightRadius = borderRadiusForSide('top-right'); - var borderBottomRightRadius = borderRadiusForSide('bottom-right'); - var borderBottomLeftRadius = borderRadiusForSide('bottom-left'); - - var borderStyleForSide = function (side) { return ({ - name: "border-" + side + "-style", - initialValue: 'solid', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, style) { - switch (style) { - case 'none': - return 0 /* NONE */; - case 'dashed': - return 2 /* DASHED */; - case 'dotted': - return 3 /* DOTTED */; - case 'double': - return 4 /* DOUBLE */; - } - return 1 /* SOLID */; - } - }); }; - var borderTopStyle = borderStyleForSide('top'); - var borderRightStyle = borderStyleForSide('right'); - var borderBottomStyle = borderStyleForSide('bottom'); - var borderLeftStyle = borderStyleForSide('left'); - - var borderWidthForSide = function (side) { return ({ - name: "border-" + side + "-width", - initialValue: '0', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isDimensionToken(token)) { - return token.number; - } - return 0; - } - }); }; - var borderTopWidth = borderWidthForSide('top'); - var borderRightWidth = borderWidthForSide('right'); - var borderBottomWidth = borderWidthForSide('bottom'); - var borderLeftWidth = borderWidthForSide('left'); - - var color = { - name: "color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var direction = { - name: 'direction', - initialValue: 'ltr', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, direction) { - switch (direction) { - case 'rtl': - return 1 /* RTL */; - case 'ltr': - default: - return 0 /* LTR */; - } - } - }; - - var display = { - name: 'display', - initialValue: 'inline-block', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.filter(isIdentToken).reduce(function (bit, token) { - return bit | parseDisplayValue(token.value); - }, 0 /* NONE */); - } - }; - var parseDisplayValue = function (display) { - switch (display) { - case 'block': - case '-webkit-box': - return 2 /* BLOCK */; - case 'inline': - return 4 /* INLINE */; - case 'run-in': - return 8 /* RUN_IN */; - case 'flow': - return 16 /* FLOW */; - case 'flow-root': - return 32 /* FLOW_ROOT */; - case 'table': - return 64 /* TABLE */; - case 'flex': - case '-webkit-flex': - return 128 /* FLEX */; - case 'grid': - case '-ms-grid': - return 256 /* GRID */; - case 'ruby': - return 512 /* RUBY */; - case 'subgrid': - return 1024 /* SUBGRID */; - case 'list-item': - return 2048 /* LIST_ITEM */; - case 'table-row-group': - return 4096 /* TABLE_ROW_GROUP */; - case 'table-header-group': - return 8192 /* TABLE_HEADER_GROUP */; - case 'table-footer-group': - return 16384 /* TABLE_FOOTER_GROUP */; - case 'table-row': - return 32768 /* TABLE_ROW */; - case 'table-cell': - return 65536 /* TABLE_CELL */; - case 'table-column-group': - return 131072 /* TABLE_COLUMN_GROUP */; - case 'table-column': - return 262144 /* TABLE_COLUMN */; - case 'table-caption': - return 524288 /* TABLE_CAPTION */; - case 'ruby-base': - return 1048576 /* RUBY_BASE */; - case 'ruby-text': - return 2097152 /* RUBY_TEXT */; - case 'ruby-base-container': - return 4194304 /* RUBY_BASE_CONTAINER */; - case 'ruby-text-container': - return 8388608 /* RUBY_TEXT_CONTAINER */; - case 'contents': - return 16777216 /* CONTENTS */; - case 'inline-block': - return 33554432 /* INLINE_BLOCK */; - case 'inline-list-item': - return 67108864 /* INLINE_LIST_ITEM */; - case 'inline-table': - return 134217728 /* INLINE_TABLE */; - case 'inline-flex': - return 268435456 /* INLINE_FLEX */; - case 'inline-grid': - return 536870912 /* INLINE_GRID */; - } - return 0 /* NONE */; - }; - - var float = { - name: 'float', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, float) { - switch (float) { - case 'left': - return 1 /* LEFT */; - case 'right': - return 2 /* RIGHT */; - case 'inline-start': - return 3 /* INLINE_START */; - case 'inline-end': - return 4 /* INLINE_END */; - } - return 0 /* NONE */; - } - }; - - var letterSpacing = { - name: 'letter-spacing', - initialValue: '0', - prefix: false, - type: 0 /* VALUE */, - parse: function (_context, token) { - if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'normal') { - return 0; - } - if (token.type === 17 /* NUMBER_TOKEN */) { - return token.number; - } - if (token.type === 15 /* DIMENSION_TOKEN */) { - return token.number; - } - return 0; - } - }; - - var LINE_BREAK; - (function (LINE_BREAK) { - LINE_BREAK["NORMAL"] = "normal"; - LINE_BREAK["STRICT"] = "strict"; - })(LINE_BREAK || (LINE_BREAK = {})); - var lineBreak = { - name: 'line-break', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, lineBreak) { - switch (lineBreak) { - case 'strict': - return LINE_BREAK.STRICT; - case 'normal': - default: - return LINE_BREAK.NORMAL; - } - } - }; - - var lineHeight = { - name: 'line-height', - initialValue: 'normal', - prefix: false, - type: 4 /* TOKEN_VALUE */ - }; - var computeLineHeight = function (token, fontSize) { - if (isIdentToken(token) && token.value === 'normal') { - return 1.2 * fontSize; - } - else if (token.type === 17 /* NUMBER_TOKEN */) { - return fontSize * token.number; - } - else if (isLengthPercentage(token)) { - return getAbsoluteValue(token, fontSize); - } - return fontSize; - }; - - var listStyleImage = { - name: 'list-style-image', - initialValue: 'none', - type: 0 /* VALUE */, - prefix: false, - parse: function (context, token) { - if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'none') { - return null; - } - return image.parse(context, token); - } - }; - - var listStylePosition = { - name: 'list-style-position', - initialValue: 'outside', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, position) { - switch (position) { - case 'inside': - return 0 /* INSIDE */; - case 'outside': - default: - return 1 /* OUTSIDE */; - } - } - }; - - var listStyleType = { - name: 'list-style-type', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, type) { - switch (type) { - case 'disc': - return 0 /* DISC */; - case 'circle': - return 1 /* CIRCLE */; - case 'square': - return 2 /* SQUARE */; - case 'decimal': - return 3 /* DECIMAL */; - case 'cjk-decimal': - return 4 /* CJK_DECIMAL */; - case 'decimal-leading-zero': - return 5 /* DECIMAL_LEADING_ZERO */; - case 'lower-roman': - return 6 /* LOWER_ROMAN */; - case 'upper-roman': - return 7 /* UPPER_ROMAN */; - case 'lower-greek': - return 8 /* LOWER_GREEK */; - case 'lower-alpha': - return 9 /* LOWER_ALPHA */; - case 'upper-alpha': - return 10 /* UPPER_ALPHA */; - case 'arabic-indic': - return 11 /* ARABIC_INDIC */; - case 'armenian': - return 12 /* ARMENIAN */; - case 'bengali': - return 13 /* BENGALI */; - case 'cambodian': - return 14 /* CAMBODIAN */; - case 'cjk-earthly-branch': - return 15 /* CJK_EARTHLY_BRANCH */; - case 'cjk-heavenly-stem': - return 16 /* CJK_HEAVENLY_STEM */; - case 'cjk-ideographic': - return 17 /* CJK_IDEOGRAPHIC */; - case 'devanagari': - return 18 /* DEVANAGARI */; - case 'ethiopic-numeric': - return 19 /* ETHIOPIC_NUMERIC */; - case 'georgian': - return 20 /* GEORGIAN */; - case 'gujarati': - return 21 /* GUJARATI */; - case 'gurmukhi': - return 22 /* GURMUKHI */; - case 'hebrew': - return 22 /* HEBREW */; - case 'hiragana': - return 23 /* HIRAGANA */; - case 'hiragana-iroha': - return 24 /* HIRAGANA_IROHA */; - case 'japanese-formal': - return 25 /* JAPANESE_FORMAL */; - case 'japanese-informal': - return 26 /* JAPANESE_INFORMAL */; - case 'kannada': - return 27 /* KANNADA */; - case 'katakana': - return 28 /* KATAKANA */; - case 'katakana-iroha': - return 29 /* KATAKANA_IROHA */; - case 'khmer': - return 30 /* KHMER */; - case 'korean-hangul-formal': - return 31 /* KOREAN_HANGUL_FORMAL */; - case 'korean-hanja-formal': - return 32 /* KOREAN_HANJA_FORMAL */; - case 'korean-hanja-informal': - return 33 /* KOREAN_HANJA_INFORMAL */; - case 'lao': - return 34 /* LAO */; - case 'lower-armenian': - return 35 /* LOWER_ARMENIAN */; - case 'malayalam': - return 36 /* MALAYALAM */; - case 'mongolian': - return 37 /* MONGOLIAN */; - case 'myanmar': - return 38 /* MYANMAR */; - case 'oriya': - return 39 /* ORIYA */; - case 'persian': - return 40 /* PERSIAN */; - case 'simp-chinese-formal': - return 41 /* SIMP_CHINESE_FORMAL */; - case 'simp-chinese-informal': - return 42 /* SIMP_CHINESE_INFORMAL */; - case 'tamil': - return 43 /* TAMIL */; - case 'telugu': - return 44 /* TELUGU */; - case 'thai': - return 45 /* THAI */; - case 'tibetan': - return 46 /* TIBETAN */; - case 'trad-chinese-formal': - return 47 /* TRAD_CHINESE_FORMAL */; - case 'trad-chinese-informal': - return 48 /* TRAD_CHINESE_INFORMAL */; - case 'upper-armenian': - return 49 /* UPPER_ARMENIAN */; - case 'disclosure-open': - return 50 /* DISCLOSURE_OPEN */; - case 'disclosure-closed': - return 51 /* DISCLOSURE_CLOSED */; - case 'none': - default: - return -1 /* NONE */; - } - } - }; - - var marginForSide = function (side) { return ({ - name: "margin-" + side, - initialValue: '0', - prefix: false, - type: 4 /* TOKEN_VALUE */ - }); }; - var marginTop = marginForSide('top'); - var marginRight = marginForSide('right'); - var marginBottom = marginForSide('bottom'); - var marginLeft = marginForSide('left'); - - var overflow = { - name: 'overflow', - initialValue: 'visible', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens.filter(isIdentToken).map(function (overflow) { - switch (overflow.value) { - case 'hidden': - return 1 /* HIDDEN */; - case 'scroll': - return 2 /* SCROLL */; - case 'clip': - return 3 /* CLIP */; - case 'auto': - return 4 /* AUTO */; - case 'visible': - default: - return 0 /* VISIBLE */; - } - }); - } - }; - - var overflowWrap = { - name: 'overflow-wrap', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, overflow) { - switch (overflow) { - case 'break-word': - return "break-word" /* BREAK_WORD */; - case 'normal': - default: - return "normal" /* NORMAL */; - } - } - }; - - var paddingForSide = function (side) { return ({ - name: "padding-" + side, - initialValue: '0', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'length-percentage' - }); }; - var paddingTop = paddingForSide('top'); - var paddingRight = paddingForSide('right'); - var paddingBottom = paddingForSide('bottom'); - var paddingLeft = paddingForSide('left'); - - var textAlign = { - name: 'text-align', - initialValue: 'left', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, textAlign) { - switch (textAlign) { - case 'right': - return 2 /* RIGHT */; - case 'center': - case 'justify': - return 1 /* CENTER */; - case 'left': - default: - return 0 /* LEFT */; - } - } - }; - - var position = { - name: 'position', - initialValue: 'static', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, position) { - switch (position) { - case 'relative': - return 1 /* RELATIVE */; - case 'absolute': - return 2 /* ABSOLUTE */; - case 'fixed': - return 3 /* FIXED */; - case 'sticky': - return 4 /* STICKY */; - } - return 0 /* STATIC */; - } - }; - - var textShadow = { - name: 'text-shadow', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (context, tokens) { - if (tokens.length === 1 && isIdentWithValue(tokens[0], 'none')) { - return []; - } - return parseFunctionArgs(tokens).map(function (values) { - var shadow = { - color: COLORS.TRANSPARENT, - offsetX: ZERO_LENGTH, - offsetY: ZERO_LENGTH, - blur: ZERO_LENGTH - }; - var c = 0; - for (var i = 0; i < values.length; i++) { - var token = values[i]; - if (isLength(token)) { - if (c === 0) { - shadow.offsetX = token; - } - else if (c === 1) { - shadow.offsetY = token; - } - else { - shadow.blur = token; - } - c++; - } - else { - shadow.color = color$1.parse(context, token); - } - } - return shadow; - }); - } - }; - - var textTransform = { - name: 'text-transform', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, textTransform) { - switch (textTransform) { - case 'uppercase': - return 2 /* UPPERCASE */; - case 'lowercase': - return 1 /* LOWERCASE */; - case 'capitalize': - return 3 /* CAPITALIZE */; - } - return 0 /* NONE */; - } - }; - - var transform$1 = { - name: 'transform', - initialValue: 'none', - prefix: true, - type: 0 /* VALUE */, - parse: function (_context, token) { - if (token.type === 20 /* IDENT_TOKEN */ && token.value === 'none') { - return null; - } - if (token.type === 18 /* FUNCTION */) { - var transformFunction = SUPPORTED_TRANSFORM_FUNCTIONS[token.name]; - if (typeof transformFunction === 'undefined') { - throw new Error("Attempting to parse an unsupported transform function \"" + token.name + "\""); - } - return transformFunction(token.values); - } - return null; - } - }; - var matrix = function (args) { - var values = args.filter(function (arg) { return arg.type === 17 /* NUMBER_TOKEN */; }).map(function (arg) { return arg.number; }); - return values.length === 6 ? values : null; - }; - // doesn't support 3D transforms at the moment - var matrix3d = function (args) { - var values = args.filter(function (arg) { return arg.type === 17 /* NUMBER_TOKEN */; }).map(function (arg) { return arg.number; }); - var a1 = values[0], b1 = values[1]; values[2]; values[3]; var a2 = values[4], b2 = values[5]; values[6]; values[7]; values[8]; values[9]; values[10]; values[11]; var a4 = values[12], b4 = values[13]; values[14]; values[15]; - return values.length === 16 ? [a1, b1, a2, b2, a4, b4] : null; - }; - var SUPPORTED_TRANSFORM_FUNCTIONS = { - matrix: matrix, - matrix3d: matrix3d - }; - - var DEFAULT_VALUE = { - type: 16 /* PERCENTAGE_TOKEN */, - number: 50, - flags: FLAG_INTEGER - }; - var DEFAULT = [DEFAULT_VALUE, DEFAULT_VALUE]; - var transformOrigin = { - name: 'transform-origin', - initialValue: '50% 50%', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - var origins = tokens.filter(isLengthPercentage); - if (origins.length !== 2) { - return DEFAULT; - } - return [origins[0], origins[1]]; - } - }; - - var visibility = { - name: 'visible', - initialValue: 'none', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, visibility) { - switch (visibility) { - case 'hidden': - return 1 /* HIDDEN */; - case 'collapse': - return 2 /* COLLAPSE */; - case 'visible': - default: - return 0 /* VISIBLE */; - } - } - }; - - var WORD_BREAK; - (function (WORD_BREAK) { - WORD_BREAK["NORMAL"] = "normal"; - WORD_BREAK["BREAK_ALL"] = "break-all"; - WORD_BREAK["KEEP_ALL"] = "keep-all"; - })(WORD_BREAK || (WORD_BREAK = {})); - var wordBreak = { - name: 'word-break', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, wordBreak) { - switch (wordBreak) { - case 'break-all': - return WORD_BREAK.BREAK_ALL; - case 'keep-all': - return WORD_BREAK.KEEP_ALL; - case 'normal': - default: - return WORD_BREAK.NORMAL; - } - } - }; - - var zIndex = { - name: 'z-index', - initialValue: 'auto', - prefix: false, - type: 0 /* VALUE */, - parse: function (_context, token) { - if (token.type === 20 /* IDENT_TOKEN */) { - return { auto: true, order: 0 }; - } - if (isNumberToken(token)) { - return { auto: false, order: token.number }; - } - throw new Error("Invalid z-index number parsed"); - } - }; - - var time = { - name: 'time', - parse: function (_context, value) { - if (value.type === 15 /* DIMENSION_TOKEN */) { - switch (value.unit.toLowerCase()) { - case 's': - return 1000 * value.number; - case 'ms': - return value.number; - } - } - throw new Error("Unsupported time type"); - } - }; - - var opacity = { - name: 'opacity', - initialValue: '1', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isNumberToken(token)) { - return token.number; - } - return 1; - } - }; - - var textDecorationColor = { - name: "text-decoration-color", - initialValue: 'transparent', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var textDecorationLine = { - name: 'text-decoration-line', - initialValue: 'none', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - return tokens - .filter(isIdentToken) - .map(function (token) { - switch (token.value) { - case 'underline': - return 1 /* UNDERLINE */; - case 'overline': - return 2 /* OVERLINE */; - case 'line-through': - return 3 /* LINE_THROUGH */; - case 'none': - return 4 /* BLINK */; - } - return 0 /* NONE */; - }) - .filter(function (line) { return line !== 0 /* NONE */; }); - } - }; - - var fontFamily = { - name: "font-family", - initialValue: '', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - var accumulator = []; - var results = []; - tokens.forEach(function (token) { - switch (token.type) { - case 20 /* IDENT_TOKEN */: - case 0 /* STRING_TOKEN */: - accumulator.push(token.value); - break; - case 17 /* NUMBER_TOKEN */: - accumulator.push(token.number.toString()); - break; - case 4 /* COMMA_TOKEN */: - results.push(accumulator.join(' ')); - accumulator.length = 0; - break; - } - }); - if (accumulator.length) { - results.push(accumulator.join(' ')); - } - return results.map(function (result) { return (result.indexOf(' ') === -1 ? result : "'" + result + "'"); }); - } - }; - - var fontSize = { - name: "font-size", - initialValue: '0', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'length' - }; - - var fontWeight = { - name: 'font-weight', - initialValue: 'normal', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isNumberToken(token)) { - return token.number; - } - if (isIdentToken(token)) { - switch (token.value) { - case 'bold': - return 700; - case 'normal': - default: - return 400; - } - } - return 400; - } - }; - - var fontVariant = { - name: 'font-variant', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (_context, tokens) { - return tokens.filter(isIdentToken).map(function (token) { return token.value; }); - } - }; - - var fontStyle = { - name: 'font-style', - initialValue: 'normal', - prefix: false, - type: 2 /* IDENT_VALUE */, - parse: function (_context, overflow) { - switch (overflow) { - case 'oblique': - return "oblique" /* OBLIQUE */; - case 'italic': - return "italic" /* ITALIC */; - case 'normal': - default: - return "normal" /* NORMAL */; - } - } - }; - - var contains = function (bit, value) { return (bit & value) !== 0; }; - - var content = { - name: 'content', - initialValue: 'none', - type: 1 /* LIST */, - prefix: false, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return []; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return []; - } - return tokens; - } - }; - - var counterIncrement = { - name: 'counter-increment', - initialValue: 'none', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return null; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return null; - } - var increments = []; - var filtered = tokens.filter(nonWhiteSpace); - for (var i = 0; i < filtered.length; i++) { - var counter = filtered[i]; - var next = filtered[i + 1]; - if (counter.type === 20 /* IDENT_TOKEN */) { - var increment = next && isNumberToken(next) ? next.number : 1; - increments.push({ counter: counter.value, increment: increment }); - } - } - return increments; - } - }; - - var counterReset = { - name: 'counter-reset', - initialValue: 'none', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return []; - } - var resets = []; - var filtered = tokens.filter(nonWhiteSpace); - for (var i = 0; i < filtered.length; i++) { - var counter = filtered[i]; - var next = filtered[i + 1]; - if (isIdentToken(counter) && counter.value !== 'none') { - var reset = next && isNumberToken(next) ? next.number : 0; - resets.push({ counter: counter.value, reset: reset }); - } - } - return resets; - } - }; - - var duration = { - name: 'duration', - initialValue: '0s', - prefix: false, - type: 1 /* LIST */, - parse: function (context, tokens) { - return tokens.filter(isDimensionToken).map(function (token) { return time.parse(context, token); }); - } - }; - - var quotes = { - name: 'quotes', - initialValue: 'none', - prefix: true, - type: 1 /* LIST */, - parse: function (_context, tokens) { - if (tokens.length === 0) { - return null; - } - var first = tokens[0]; - if (first.type === 20 /* IDENT_TOKEN */ && first.value === 'none') { - return null; - } - var quotes = []; - var filtered = tokens.filter(isStringToken); - if (filtered.length % 2 !== 0) { - return null; - } - for (var i = 0; i < filtered.length; i += 2) { - var open_1 = filtered[i].value; - var close_1 = filtered[i + 1].value; - quotes.push({ open: open_1, close: close_1 }); - } - return quotes; - } - }; - var getQuote = function (quotes, depth, open) { - if (!quotes) { - return ''; - } - var quote = quotes[Math.min(depth, quotes.length - 1)]; - if (!quote) { - return ''; - } - return open ? quote.open : quote.close; - }; - - var paintOrder = { - name: 'paint-order', - initialValue: 'normal', - prefix: false, - type: 1 /* LIST */, - parse: function (_context, tokens) { - var DEFAULT_VALUE = [0 /* FILL */, 1 /* STROKE */, 2 /* MARKERS */]; - var layers = []; - tokens.filter(isIdentToken).forEach(function (token) { - switch (token.value) { - case 'stroke': - layers.push(1 /* STROKE */); - break; - case 'fill': - layers.push(0 /* FILL */); - break; - case 'markers': - layers.push(2 /* MARKERS */); - break; - } - }); - DEFAULT_VALUE.forEach(function (value) { - if (layers.indexOf(value) === -1) { - layers.push(value); - } - }); - return layers; - } - }; - - var webkitTextStrokeColor = { - name: "-webkit-text-stroke-color", - initialValue: 'currentcolor', - prefix: false, - type: 3 /* TYPE_VALUE */, - format: 'color' - }; - - var webkitTextStrokeWidth = { - name: "-webkit-text-stroke-width", - initialValue: '0', - type: 0 /* VALUE */, - prefix: false, - parse: function (_context, token) { - if (isDimensionToken(token)) { - return token.number; - } - return 0; - } - }; - - var CSSParsedDeclaration = /** @class */ (function () { - function CSSParsedDeclaration(context, declaration) { - var _a, _b; - this.animationDuration = parse(context, duration, declaration.animationDuration); - this.backgroundClip = parse(context, backgroundClip, declaration.backgroundClip); - this.backgroundColor = parse(context, backgroundColor, declaration.backgroundColor); - this.backgroundImage = parse(context, backgroundImage, declaration.backgroundImage); - this.backgroundOrigin = parse(context, backgroundOrigin, declaration.backgroundOrigin); - this.backgroundPosition = parse(context, backgroundPosition, declaration.backgroundPosition); - this.backgroundRepeat = parse(context, backgroundRepeat, declaration.backgroundRepeat); - this.backgroundSize = parse(context, backgroundSize, declaration.backgroundSize); - this.borderTopColor = parse(context, borderTopColor, declaration.borderTopColor); - this.borderRightColor = parse(context, borderRightColor, declaration.borderRightColor); - this.borderBottomColor = parse(context, borderBottomColor, declaration.borderBottomColor); - this.borderLeftColor = parse(context, borderLeftColor, declaration.borderLeftColor); - this.borderTopLeftRadius = parse(context, borderTopLeftRadius, declaration.borderTopLeftRadius); - this.borderTopRightRadius = parse(context, borderTopRightRadius, declaration.borderTopRightRadius); - this.borderBottomRightRadius = parse(context, borderBottomRightRadius, declaration.borderBottomRightRadius); - this.borderBottomLeftRadius = parse(context, borderBottomLeftRadius, declaration.borderBottomLeftRadius); - this.borderTopStyle = parse(context, borderTopStyle, declaration.borderTopStyle); - this.borderRightStyle = parse(context, borderRightStyle, declaration.borderRightStyle); - this.borderBottomStyle = parse(context, borderBottomStyle, declaration.borderBottomStyle); - this.borderLeftStyle = parse(context, borderLeftStyle, declaration.borderLeftStyle); - this.borderTopWidth = parse(context, borderTopWidth, declaration.borderTopWidth); - this.borderRightWidth = parse(context, borderRightWidth, declaration.borderRightWidth); - this.borderBottomWidth = parse(context, borderBottomWidth, declaration.borderBottomWidth); - this.borderLeftWidth = parse(context, borderLeftWidth, declaration.borderLeftWidth); - this.color = parse(context, color, declaration.color); - this.direction = parse(context, direction, declaration.direction); - this.display = parse(context, display, declaration.display); - this.float = parse(context, float, declaration.cssFloat); - this.fontFamily = parse(context, fontFamily, declaration.fontFamily); - this.fontSize = parse(context, fontSize, declaration.fontSize); - this.fontStyle = parse(context, fontStyle, declaration.fontStyle); - this.fontVariant = parse(context, fontVariant, declaration.fontVariant); - this.fontWeight = parse(context, fontWeight, declaration.fontWeight); - this.letterSpacing = parse(context, letterSpacing, declaration.letterSpacing); - this.lineBreak = parse(context, lineBreak, declaration.lineBreak); - this.lineHeight = parse(context, lineHeight, declaration.lineHeight); - this.listStyleImage = parse(context, listStyleImage, declaration.listStyleImage); - this.listStylePosition = parse(context, listStylePosition, declaration.listStylePosition); - this.listStyleType = parse(context, listStyleType, declaration.listStyleType); - this.marginTop = parse(context, marginTop, declaration.marginTop); - this.marginRight = parse(context, marginRight, declaration.marginRight); - this.marginBottom = parse(context, marginBottom, declaration.marginBottom); - this.marginLeft = parse(context, marginLeft, declaration.marginLeft); - this.opacity = parse(context, opacity, declaration.opacity); - var overflowTuple = parse(context, overflow, declaration.overflow); - this.overflowX = overflowTuple[0]; - this.overflowY = overflowTuple[overflowTuple.length > 1 ? 1 : 0]; - this.overflowWrap = parse(context, overflowWrap, declaration.overflowWrap); - this.paddingTop = parse(context, paddingTop, declaration.paddingTop); - this.paddingRight = parse(context, paddingRight, declaration.paddingRight); - this.paddingBottom = parse(context, paddingBottom, declaration.paddingBottom); - this.paddingLeft = parse(context, paddingLeft, declaration.paddingLeft); - this.paintOrder = parse(context, paintOrder, declaration.paintOrder); - this.position = parse(context, position, declaration.position); - this.textAlign = parse(context, textAlign, declaration.textAlign); - this.textDecorationColor = parse(context, textDecorationColor, (_a = declaration.textDecorationColor) !== null && _a !== void 0 ? _a : declaration.color); - this.textDecorationLine = parse(context, textDecorationLine, (_b = declaration.textDecorationLine) !== null && _b !== void 0 ? _b : declaration.textDecoration); - this.textShadow = parse(context, textShadow, declaration.textShadow); - this.textTransform = parse(context, textTransform, declaration.textTransform); - this.transform = parse(context, transform$1, declaration.transform); - this.transformOrigin = parse(context, transformOrigin, declaration.transformOrigin); - this.visibility = parse(context, visibility, declaration.visibility); - this.webkitTextStrokeColor = parse(context, webkitTextStrokeColor, declaration.webkitTextStrokeColor); - this.webkitTextStrokeWidth = parse(context, webkitTextStrokeWidth, declaration.webkitTextStrokeWidth); - this.wordBreak = parse(context, wordBreak, declaration.wordBreak); - this.zIndex = parse(context, zIndex, declaration.zIndex); - } - CSSParsedDeclaration.prototype.isVisible = function () { - return this.display > 0 && this.opacity > 0 && this.visibility === 0 /* VISIBLE */; - }; - CSSParsedDeclaration.prototype.isTransparent = function () { - return isTransparent(this.backgroundColor); - }; - CSSParsedDeclaration.prototype.isTransformed = function () { - return this.transform !== null; - }; - CSSParsedDeclaration.prototype.isPositioned = function () { - return this.position !== 0 /* STATIC */; - }; - CSSParsedDeclaration.prototype.isPositionedWithZIndex = function () { - return this.isPositioned() && !this.zIndex.auto; - }; - CSSParsedDeclaration.prototype.isFloating = function () { - return this.float !== 0 /* NONE */; - }; - CSSParsedDeclaration.prototype.isInlineLevel = function () { - return (contains(this.display, 4 /* INLINE */) || - contains(this.display, 33554432 /* INLINE_BLOCK */) || - contains(this.display, 268435456 /* INLINE_FLEX */) || - contains(this.display, 536870912 /* INLINE_GRID */) || - contains(this.display, 67108864 /* INLINE_LIST_ITEM */) || - contains(this.display, 134217728 /* INLINE_TABLE */)); - }; - return CSSParsedDeclaration; - }()); - var CSSParsedPseudoDeclaration = /** @class */ (function () { - function CSSParsedPseudoDeclaration(context, declaration) { - this.content = parse(context, content, declaration.content); - this.quotes = parse(context, quotes, declaration.quotes); - } - return CSSParsedPseudoDeclaration; - }()); - var CSSParsedCounterDeclaration = /** @class */ (function () { - function CSSParsedCounterDeclaration(context, declaration) { - this.counterIncrement = parse(context, counterIncrement, declaration.counterIncrement); - this.counterReset = parse(context, counterReset, declaration.counterReset); - } - return CSSParsedCounterDeclaration; - }()); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var parse = function (context, descriptor, style) { - var tokenizer = new Tokenizer(); - var value = style !== null && typeof style !== 'undefined' ? style.toString() : descriptor.initialValue; - tokenizer.write(value); - var parser = new Parser(tokenizer.read()); - switch (descriptor.type) { - case 2 /* IDENT_VALUE */: - var token = parser.parseComponentValue(); - return descriptor.parse(context, isIdentToken(token) ? token.value : descriptor.initialValue); - case 0 /* VALUE */: - return descriptor.parse(context, parser.parseComponentValue()); - case 1 /* LIST */: - return descriptor.parse(context, parser.parseComponentValues()); - case 4 /* TOKEN_VALUE */: - return parser.parseComponentValue(); - case 3 /* TYPE_VALUE */: - switch (descriptor.format) { - case 'angle': - return angle.parse(context, parser.parseComponentValue()); - case 'color': - return color$1.parse(context, parser.parseComponentValue()); - case 'image': - return image.parse(context, parser.parseComponentValue()); - case 'length': - var length_1 = parser.parseComponentValue(); - return isLength(length_1) ? length_1 : ZERO_LENGTH; - case 'length-percentage': - var value_1 = parser.parseComponentValue(); - return isLengthPercentage(value_1) ? value_1 : ZERO_LENGTH; - case 'time': - return time.parse(context, parser.parseComponentValue()); - } - break; - } - }; - - var elementDebuggerAttribute = 'data-html2canvas-debug'; - var getElementDebugType = function (element) { - var attribute = element.getAttribute(elementDebuggerAttribute); - switch (attribute) { - case 'all': - return 1 /* ALL */; - case 'clone': - return 2 /* CLONE */; - case 'parse': - return 3 /* PARSE */; - case 'render': - return 4 /* RENDER */; - default: - return 0 /* NONE */; - } - }; - var isDebugging = function (element, type) { - var elementType = getElementDebugType(element); - return elementType === 1 /* ALL */ || type === elementType; - }; - - var ElementContainer = /** @class */ (function () { - function ElementContainer(context, element) { - this.context = context; - this.textNodes = []; - this.elements = []; - this.flags = 0; - if (isDebugging(element, 3 /* PARSE */)) { - debugger; - } - this.styles = new CSSParsedDeclaration(context, window.getComputedStyle(element, null)); - if (isHTMLElementNode(element)) { - if (this.styles.animationDuration.some(function (duration) { return duration > 0; })) { - element.style.animationDuration = '0s'; - } - if (this.styles.transform !== null) { - // getBoundingClientRect takes transforms into account - element.style.transform = 'none'; - } - } - this.bounds = parseBounds(this.context, element); - if (isDebugging(element, 4 /* RENDER */)) { - this.flags |= 16 /* DEBUG_RENDER */; - } - } - return ElementContainer; - }()); - - /* - * text-segmentation 1.0.3 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var base64 = 'AAAAAAAAAAAAEA4AGBkAAFAaAAACAAAAAAAIABAAGAAwADgACAAQAAgAEAAIABAACAAQAAgAEAAIABAACAAQAAgAEAAIABAAQABIAEQATAAIABAACAAQAAgAEAAIABAAVABcAAgAEAAIABAACAAQAGAAaABwAHgAgACIAI4AlgAIABAAmwCjAKgAsAC2AL4AvQDFAMoA0gBPAVYBWgEIAAgACACMANoAYgFkAWwBdAF8AX0BhQGNAZUBlgGeAaMBlQGWAasBswF8AbsBwwF0AcsBYwHTAQgA2wG/AOMBdAF8AekB8QF0AfkB+wHiAHQBfAEIAAMC5gQIAAsCEgIIAAgAFgIeAggAIgIpAggAMQI5AkACygEIAAgASAJQAlgCYAIIAAgACAAKBQoFCgUTBRMFGQUrBSsFCAAIAAgACAAIAAgACAAIAAgACABdAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABoAmgCrwGvAQgAbgJ2AggAHgEIAAgACADnAXsCCAAIAAgAgwIIAAgACAAIAAgACACKAggAkQKZAggAPADJAAgAoQKkAqwCsgK6AsICCADJAggA0AIIAAgACAAIANYC3gIIAAgACAAIAAgACABAAOYCCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAkASoB+QIEAAgACAA8AEMCCABCBQgACABJBVAFCAAIAAgACAAIAAgACAAIAAgACABTBVoFCAAIAFoFCABfBWUFCAAIAAgACAAIAAgAbQUIAAgACAAIAAgACABzBXsFfQWFBYoFigWKBZEFigWKBYoFmAWfBaYFrgWxBbkFCAAIAAgACAAIAAgACAAIAAgACAAIAMEFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAMgFCADQBQgACAAIAAgACAAIAAgACAAIAAgACAAIAO4CCAAIAAgAiQAIAAgACABAAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAD0AggACAD8AggACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIANYFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAMDvwAIAAgAJAIIAAgACAAIAAgACAAIAAgACwMTAwgACAB9BOsEGwMjAwgAKwMyAwsFYgE3A/MEPwMIAEUDTQNRAwgAWQOsAGEDCAAIAAgACAAIAAgACABpAzQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFOgU0BTUFNgU3BTgFOQU6BTQFNQU2BTcFOAU5BToFNAU1BTYFNwU4BTkFIQUoBSwFCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABtAwgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABMAEwACAAIAAgACAAIABgACAAIAAgACAC/AAgACAAyAQgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACAAIAAwAAgACAAIAAgACAAIAAgACAAIAAAARABIAAgACAAIABQASAAIAAgAIABwAEAAjgCIABsAqAC2AL0AigDQAtwC+IJIQqVAZUBWQqVAZUBlQGVAZUBlQGrC5UBlQGVAZUBlQGVAZUBlQGVAXsKlQGVAbAK6wsrDGUMpQzlDJUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAZUBlQGVAfAKAAuZA64AtwCJALoC6ADwAAgAuACgA/oEpgO6AqsD+AAIAAgAswMIAAgACAAIAIkAuwP5AfsBwwPLAwgACAAIAAgACADRA9kDCAAIAOED6QMIAAgACAAIAAgACADuA/YDCAAIAP4DyQAIAAgABgQIAAgAXQAOBAgACAAIAAgACAAIABMECAAIAAgACAAIAAgACAD8AAQBCAAIAAgAGgQiBCoECAExBAgAEAEIAAgACAAIAAgACAAIAAgACAAIAAgACAA4BAgACABABEYECAAIAAgATAQYAQgAVAQIAAgACAAIAAgACAAIAAgACAAIAFoECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAOQEIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAB+BAcACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAEABhgSMBAgACAAIAAgAlAQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAwAEAAQABAADAAMAAwADAAQABAAEAAQABAAEAAQABHATAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAdQMIAAgACAAIAAgACAAIAMkACAAIAAgAfQMIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACACFA4kDCAAIAAgACAAIAOcBCAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAIcDCAAIAAgACAAIAAgACAAIAAgACAAIAJEDCAAIAAgACADFAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABgBAgAZgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAbAQCBXIECAAIAHkECAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACABAAJwEQACjBKoEsgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAC6BMIECAAIAAgACAAIAAgACABmBAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAxwQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAGYECAAIAAgAzgQIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBd0FXwUIAOIF6gXxBYoF3gT5BQAGCAaKBYoFigWKBYoFigWKBYoFigWKBYoFigXWBIoFigWKBYoFigWKBYoFigWKBYsFEAaKBYoFigWKBYoFigWKBRQGCACKBYoFigWKBQgACAAIANEECAAIABgGigUgBggAJgYIAC4GMwaKBYoF0wQ3Bj4GigWKBYoFigWKBYoFigWKBYoFigWKBYoFigUIAAgACAAIAAgACAAIAAgAigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWKBYoFigWLBf///////wQABAAEAAQABAAEAAQABAAEAAQAAwAEAAQAAgAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAQADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUAAAAFAAUAAAAFAAUAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUAAQAAAAUABQAFAAUABQAFAAAAAAAFAAUAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAFAAUAAQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAAABwAHAAcAAAAHAAcABwAFAAEAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAcABwAFAAUAAAAAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAAAAQABAAAAAAAAAAAAAAAFAAUABQAFAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAHAAcAAAAHAAcAAAAAAAUABQAHAAUAAQAHAAEABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwABAAUABQAFAAUAAAAAAAAAAAAAAAEAAQABAAEAAQABAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABQANAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAEAAQABAAEAAQABAAEAAQABAAEAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAABQAHAAUABQAFAAAAAAAAAAcABQAFAAUABQAFAAQABAAEAAQABAAEAAQABAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAEAAQABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUAAAAFAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAUAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAcABwAFAAcABwAAAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUABwAHAAUABQAFAAUAAAAAAAcABwAAAAAABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAABQAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAAAAAAAAAAABQAFAAAAAAAFAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAFAAUABQAFAAUAAAAFAAUABwAAAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABwAFAAUABQAFAAAAAAAHAAcAAAAAAAcABwAFAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAAAAAAAAAHAAcABwAAAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAABQAHAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAUABQAFAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAHAAcABQAHAAcAAAAFAAcABwAAAAcABwAFAAUAAAAAAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAFAAcABwAFAAUABQAAAAUAAAAHAAcABwAHAAcABwAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAHAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAABwAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAUAAAAFAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABwAFAAUABQAFAAUAAAAFAAUAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABwAFAAUABQAFAAUABQAAAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABQAFAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABQAFAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAHAAUABQAFAAUABQAFAAUABwAHAAcABwAHAAcABwAHAAUABwAHAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABwAHAAcABwAFAAUABwAHAAcAAAAAAAAAAAAHAAcABQAHAAcABwAHAAcABwAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAcABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAUABQAFAAUABQAFAAUAAAAFAAAABQAAAAAABQAFAAUABQAFAAUABQAFAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAFAAUAAAAAAAUABQAFAAUABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABwAFAAcABwAHAAcABwAFAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAUABQAFAAUABwAHAAUABQAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABQAFAAcABwAHAAUABwAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAcABQAFAAUABQAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAAAAAABwAFAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAAAAAAAAAFAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAUABQAHAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAUABQAFAAUABQAHAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAcABwAFAAUABQAFAAcABwAFAAUABwAHAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAFAAcABwAFAAUABwAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAFAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAFAAUABQAAAAAABQAFAAAAAAAAAAAAAAAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAcABwAAAAAAAAAAAAAABwAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAcABwAFAAcABwAAAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAFAAUABQAAAAUABQAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABwAFAAUABQAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAUABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAHAAcABQAHAAUABQAAAAAAAAAAAAAAAAAFAAAABwAHAAcABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAHAAcABwAAAAAABwAHAAAAAAAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABwAHAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAFAAUABwAFAAcABwAFAAcABQAFAAcABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAHAAcABQAFAAUABQAAAAAABwAHAAcABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAHAAUABQAFAAUABQAFAAUABQAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABwAFAAcABwAFAAUABQAFAAUABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAcABwAFAAUABQAFAAcABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAUABQAFAAUABQAHAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAFAAUABQAFAAAAAAAFAAUABwAHAAcABwAFAAAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABwAHAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAcABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUAAAAHAAUABQAFAAUABQAFAAUABwAFAAUABwAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUAAAAAAAAABQAAAAUABQAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAHAAcABwAHAAcAAAAFAAUAAAAHAAcABQAHAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAAAAUABQAFAAAAAAAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAFAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAFAAUABQAAAAAABQAFAAUABQAFAAUABQAAAAUABQAAAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFAAUABQAFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQAFAAUABQAFAAUABQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAFAAUABQAFAAUADgAOAA4ADgAOAA4ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAA8ADwAPAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAcABwAHAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAgACAAIAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAMAAwADAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkACQAJAAkAAAAAAAAAAAAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAKAAoACgAAAAAAAAAAAAsADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwACwAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAMAAwADAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAADgAOAA4AAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAAAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4AAAAOAAAAAAAAAAAAAAAAAA4AAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAAAAAAAAAAAA4AAAAOAAAAAAAAAAAADgAOAA4AAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAA4AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAAAA4ADgAOAA4ADgAOAA4ADgAOAAAADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4ADgAOAAAAAAAAAAAAAAAAAAAAAAAAAAAADgAOAA4ADgAOAA4AAAAAAAAAAAAAAAAAAAAAAA4ADgAOAA4ADgAOAA4ADgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAOAA4ADgAOAA4ADgAAAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4ADgAOAA4AAAAAAAAAAAA='; - - /* - * utrie 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars$1 = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup$1 = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i$1 = 0; i$1 < chars$1.length; i$1++) { - lookup$1[chars$1.charCodeAt(i$1)] = i$1; - } - var decode = function (base64) { - var bufferLength = base64.length * 0.75, len = base64.length, i, p = 0, encoded1, encoded2, encoded3, encoded4; - if (base64[base64.length - 1] === '=') { - bufferLength--; - if (base64[base64.length - 2] === '=') { - bufferLength--; - } - } - var buffer = typeof ArrayBuffer !== 'undefined' && - typeof Uint8Array !== 'undefined' && - typeof Uint8Array.prototype.slice !== 'undefined' - ? new ArrayBuffer(bufferLength) - : new Array(bufferLength); - var bytes = Array.isArray(buffer) ? buffer : new Uint8Array(buffer); - for (i = 0; i < len; i += 4) { - encoded1 = lookup$1[base64.charCodeAt(i)]; - encoded2 = lookup$1[base64.charCodeAt(i + 1)]; - encoded3 = lookup$1[base64.charCodeAt(i + 2)]; - encoded4 = lookup$1[base64.charCodeAt(i + 3)]; - bytes[p++] = (encoded1 << 2) | (encoded2 >> 4); - bytes[p++] = ((encoded2 & 15) << 4) | (encoded3 >> 2); - bytes[p++] = ((encoded3 & 3) << 6) | (encoded4 & 63); - } - return buffer; - }; - var polyUint16Array = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 2) { - bytes.push((buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - var polyUint32Array = function (buffer) { - var length = buffer.length; - var bytes = []; - for (var i = 0; i < length; i += 4) { - bytes.push((buffer[i + 3] << 24) | (buffer[i + 2] << 16) | (buffer[i + 1] << 8) | buffer[i]); - } - return bytes; - }; - - /** Shift size for getting the index-2 table offset. */ - var UTRIE2_SHIFT_2 = 5; - /** Shift size for getting the index-1 table offset. */ - var UTRIE2_SHIFT_1 = 6 + 5; - /** - * Shift size for shifting left the index array values. - * Increases possible data size with 16-bit index values at the cost - * of compactability. - * This requires data blocks to be aligned by UTRIE2_DATA_GRANULARITY. - */ - var UTRIE2_INDEX_SHIFT = 2; - /** - * Difference between the two shift sizes, - * for getting an index-1 offset from an index-2 offset. 6=11-5 - */ - var UTRIE2_SHIFT_1_2 = UTRIE2_SHIFT_1 - UTRIE2_SHIFT_2; - /** - * The part of the index-2 table for U+D800..U+DBFF stores values for - * lead surrogate code _units_ not code _points_. - * Values for lead surrogate code _points_ are indexed with this portion of the table. - * Length=32=0x20=0x400>>UTRIE2_SHIFT_2. (There are 1024=0x400 lead surrogates.) - */ - var UTRIE2_LSCP_INDEX_2_OFFSET = 0x10000 >> UTRIE2_SHIFT_2; - /** Number of entries in a data block. 32=0x20 */ - var UTRIE2_DATA_BLOCK_LENGTH = 1 << UTRIE2_SHIFT_2; - /** Mask for getting the lower bits for the in-data-block offset. */ - var UTRIE2_DATA_MASK = UTRIE2_DATA_BLOCK_LENGTH - 1; - var UTRIE2_LSCP_INDEX_2_LENGTH = 0x400 >> UTRIE2_SHIFT_2; - /** Count the lengths of both BMP pieces. 2080=0x820 */ - var UTRIE2_INDEX_2_BMP_LENGTH = UTRIE2_LSCP_INDEX_2_OFFSET + UTRIE2_LSCP_INDEX_2_LENGTH; - /** - * The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820. - * Length 32=0x20 for lead bytes C0..DF, regardless of UTRIE2_SHIFT_2. - */ - var UTRIE2_UTF8_2B_INDEX_2_OFFSET = UTRIE2_INDEX_2_BMP_LENGTH; - var UTRIE2_UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6; /* U+0800 is the first code point after 2-byte UTF-8 */ - /** - * The index-1 table, only used for supplementary code points, at offset 2112=0x840. - * Variable length, for code points up to highStart, where the last single-value range starts. - * Maximum length 512=0x200=0x100000>>UTRIE2_SHIFT_1. - * (For 0x100000 supplementary code points U+10000..U+10ffff.) - * - * The part of the index-2 table for supplementary code points starts - * after this index-1 table. - * - * Both the index-1 table and the following part of the index-2 table - * are omitted completely if there is only BMP data. - */ - var UTRIE2_INDEX_1_OFFSET = UTRIE2_UTF8_2B_INDEX_2_OFFSET + UTRIE2_UTF8_2B_INDEX_2_LENGTH; - /** - * Number of index-1 entries for the BMP. 32=0x20 - * This part of the index-1 table is omitted from the serialized form. - */ - var UTRIE2_OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> UTRIE2_SHIFT_1; - /** Number of entries in an index-2 block. 64=0x40 */ - var UTRIE2_INDEX_2_BLOCK_LENGTH = 1 << UTRIE2_SHIFT_1_2; - /** Mask for getting the lower bits for the in-index-2-block offset. */ - var UTRIE2_INDEX_2_MASK = UTRIE2_INDEX_2_BLOCK_LENGTH - 1; - var slice16 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint16Array(Array.prototype.slice.call(view, start, end)); - }; - var slice32 = function (view, start, end) { - if (view.slice) { - return view.slice(start, end); - } - return new Uint32Array(Array.prototype.slice.call(view, start, end)); - }; - var createTrieFromBase64 = function (base64, _byteLength) { - var buffer = decode(base64); - var view32 = Array.isArray(buffer) ? polyUint32Array(buffer) : new Uint32Array(buffer); - var view16 = Array.isArray(buffer) ? polyUint16Array(buffer) : new Uint16Array(buffer); - var headerLength = 24; - var index = slice16(view16, headerLength / 2, view32[4] / 2); - var data = view32[5] === 2 - ? slice16(view16, (headerLength + view32[4]) / 2) - : slice32(view32, Math.ceil((headerLength + view32[4]) / 4)); - return new Trie(view32[0], view32[1], view32[2], view32[3], index, data); - }; - var Trie = /** @class */ (function () { - function Trie(initialValue, errorValue, highStart, highValueIndex, index, data) { - this.initialValue = initialValue; - this.errorValue = errorValue; - this.highStart = highStart; - this.highValueIndex = highValueIndex; - this.index = index; - this.data = data; - } - /** - * Get the value for a code point as stored in the Trie. - * - * @param codePoint the code point - * @return the value - */ - Trie.prototype.get = function (codePoint) { - var ix; - if (codePoint >= 0) { - if (codePoint < 0x0d800 || (codePoint > 0x0dbff && codePoint <= 0x0ffff)) { - // Ordinary BMP code point, excluding leading surrogates. - // BMP uses a single level lookup. BMP index starts at offset 0 in the Trie2 index. - // 16 bit data is stored in the index array itself. - ix = this.index[codePoint >> UTRIE2_SHIFT_2]; - ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK); - return this.data[ix]; - } - if (codePoint <= 0xffff) { - // Lead Surrogate Code Point. A Separate index section is stored for - // lead surrogate code units and code points. - // The main index has the code unit data. - // For this function, we need the code point data. - // Note: this expression could be refactored for slightly improved efficiency, but - // surrogate code points will be so rare in practice that it's not worth it. - ix = this.index[UTRIE2_LSCP_INDEX_2_OFFSET + ((codePoint - 0xd800) >> UTRIE2_SHIFT_2)]; - ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK); - return this.data[ix]; - } - if (codePoint < this.highStart) { - // Supplemental code point, use two-level lookup. - ix = UTRIE2_INDEX_1_OFFSET - UTRIE2_OMITTED_BMP_INDEX_1_LENGTH + (codePoint >> UTRIE2_SHIFT_1); - ix = this.index[ix]; - ix += (codePoint >> UTRIE2_SHIFT_2) & UTRIE2_INDEX_2_MASK; - ix = this.index[ix]; - ix = (ix << UTRIE2_INDEX_SHIFT) + (codePoint & UTRIE2_DATA_MASK); - return this.data[ix]; - } - if (codePoint <= 0x10ffff) { - return this.data[this.highValueIndex]; - } - } - // Fall through. The code point is outside of the legal range of 0..0x10ffff. - return this.errorValue; - }; - return Trie; - }()); - - /* - * base64-arraybuffer 1.0.2 - * Copyright (c) 2022 Niklas von Hertzen - * Released under MIT License - */ - var chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'; - // Use a lookup table to find the index. - var lookup = typeof Uint8Array === 'undefined' ? [] : new Uint8Array(256); - for (var i = 0; i < chars.length; i++) { - lookup[chars.charCodeAt(i)] = i; - } - - var Prepend = 1; - var CR = 2; - var LF = 3; - var Control = 4; - var Extend = 5; - var SpacingMark = 7; - var L = 8; - var V = 9; - var T = 10; - var LV = 11; - var LVT = 12; - var ZWJ = 13; - var Extended_Pictographic = 14; - var RI = 15; - var toCodePoints = function (str) { - var codePoints = []; - var i = 0; - var length = str.length; - while (i < length) { - var value = str.charCodeAt(i++); - if (value >= 0xd800 && value <= 0xdbff && i < length) { - var extra = str.charCodeAt(i++); - if ((extra & 0xfc00) === 0xdc00) { - codePoints.push(((value & 0x3ff) << 10) + (extra & 0x3ff) + 0x10000); - } - else { - codePoints.push(value); - i--; - } - } - else { - codePoints.push(value); - } - } - return codePoints; - }; - var fromCodePoint = function () { - var codePoints = []; - for (var _i = 0; _i < arguments.length; _i++) { - codePoints[_i] = arguments[_i]; - } - if (String.fromCodePoint) { - return String.fromCodePoint.apply(String, codePoints); - } - var length = codePoints.length; - if (!length) { - return ''; - } - var codeUnits = []; - var index = -1; - var result = ''; - while (++index < length) { - var codePoint = codePoints[index]; - if (codePoint <= 0xffff) { - codeUnits.push(codePoint); - } - else { - codePoint -= 0x10000; - codeUnits.push((codePoint >> 10) + 0xd800, (codePoint % 0x400) + 0xdc00); - } - if (index + 1 === length || codeUnits.length > 0x4000) { - result += String.fromCharCode.apply(String, codeUnits); - codeUnits.length = 0; - } - } - return result; - }; - var UnicodeTrie = createTrieFromBase64(base64); - var BREAK_NOT_ALLOWED = '×'; - var BREAK_ALLOWED = '÷'; - var codePointToClass = function (codePoint) { return UnicodeTrie.get(codePoint); }; - var _graphemeBreakAtIndex = function (_codePoints, classTypes, index) { - var prevIndex = index - 2; - var prev = classTypes[prevIndex]; - var current = classTypes[index - 1]; - var next = classTypes[index]; - // GB3 Do not break between a CR and LF - if (current === CR && next === LF) { - return BREAK_NOT_ALLOWED; - } - // GB4 Otherwise, break before and after controls. - if (current === CR || current === LF || current === Control) { - return BREAK_ALLOWED; - } - // GB5 - if (next === CR || next === LF || next === Control) { - return BREAK_ALLOWED; - } - // Do not break Hangul syllable sequences. - // GB6 - if (current === L && [L, V, LV, LVT].indexOf(next) !== -1) { - return BREAK_NOT_ALLOWED; - } - // GB7 - if ((current === LV || current === V) && (next === V || next === T)) { - return BREAK_NOT_ALLOWED; - } - // GB8 - if ((current === LVT || current === T) && next === T) { - return BREAK_NOT_ALLOWED; - } - // GB9 Do not break before extending characters or ZWJ. - if (next === ZWJ || next === Extend) { - return BREAK_NOT_ALLOWED; - } - // Do not break before SpacingMarks, or after Prepend characters. - // GB9a - if (next === SpacingMark) { - return BREAK_NOT_ALLOWED; - } - // GB9a - if (current === Prepend) { - return BREAK_NOT_ALLOWED; - } - // GB11 Do not break within emoji modifier sequences or emoji zwj sequences. - if (current === ZWJ && next === Extended_Pictographic) { - while (prev === Extend) { - prev = classTypes[--prevIndex]; - } - if (prev === Extended_Pictographic) { - return BREAK_NOT_ALLOWED; - } - } - // GB12 Do not break within emoji flag sequences. - // That is, do not break between regional indicator (RI) symbols - // if there is an odd number of RI characters before the break point. - if (current === RI && next === RI) { - var countRI = 0; - while (prev === RI) { - countRI++; - prev = classTypes[--prevIndex]; - } - if (countRI % 2 === 0) { - return BREAK_NOT_ALLOWED; - } - } - return BREAK_ALLOWED; - }; - var GraphemeBreaker = function (str) { - var codePoints = toCodePoints(str); - var length = codePoints.length; - var index = 0; - var lastEnd = 0; - var classTypes = codePoints.map(codePointToClass); - return { - next: function () { - if (index >= length) { - return { done: true, value: null }; - } - var graphemeBreak = BREAK_NOT_ALLOWED; - while (index < length && - (graphemeBreak = _graphemeBreakAtIndex(codePoints, classTypes, ++index)) === BREAK_NOT_ALLOWED) { } - if (graphemeBreak !== BREAK_NOT_ALLOWED || index === length) { - var value = fromCodePoint.apply(null, codePoints.slice(lastEnd, index)); - lastEnd = index; - return { value: value, done: false }; - } - return { done: true, value: null }; - }, - }; - }; - var splitGraphemes = function (str) { - var breaker = GraphemeBreaker(str); - var graphemes = []; - var bk; - while (!(bk = breaker.next()).done) { - if (bk.value) { - graphemes.push(bk.value.slice()); - } - } - return graphemes; - }; - - var testRangeBounds = function (document) { - var TEST_HEIGHT = 123; - if (document.createRange) { - var range = document.createRange(); - if (range.getBoundingClientRect) { - var testElement = document.createElement('boundtest'); - testElement.style.height = TEST_HEIGHT + "px"; - testElement.style.display = 'block'; - document.body.appendChild(testElement); - range.selectNode(testElement); - var rangeBounds = range.getBoundingClientRect(); - var rangeHeight = Math.round(rangeBounds.height); - document.body.removeChild(testElement); - if (rangeHeight === TEST_HEIGHT) { - return true; - } - } - } - return false; - }; - var testIOSLineBreak = function (document) { - var testElement = document.createElement('boundtest'); - testElement.style.width = '50px'; - testElement.style.display = 'block'; - testElement.style.fontSize = '12px'; - testElement.style.letterSpacing = '0px'; - testElement.style.wordSpacing = '0px'; - document.body.appendChild(testElement); - var range = document.createRange(); - testElement.innerHTML = typeof ''.repeat === 'function' ? '👨'.repeat(10) : ''; - var node = testElement.firstChild; - var textList = toCodePoints$1(node.data).map(function (i) { return fromCodePoint$1(i); }); - var offset = 0; - var prev = {}; - // ios 13 does not handle range getBoundingClientRect line changes correctly #2177 - var supports = textList.every(function (text, i) { - range.setStart(node, offset); - range.setEnd(node, offset + text.length); - var rect = range.getBoundingClientRect(); - offset += text.length; - var boundAhead = rect.x > prev.x || rect.y > prev.y; - prev = rect; - if (i === 0) { - return true; - } - return boundAhead; - }); - document.body.removeChild(testElement); - return supports; - }; - var testCORS = function () { return typeof new Image().crossOrigin !== 'undefined'; }; - var testResponseType = function () { return typeof new XMLHttpRequest().responseType === 'string'; }; - var testSVG = function (document) { - var img = new Image(); - var canvas = document.createElement('canvas'); - var ctx = canvas.getContext('2d'); - if (!ctx) { - return false; - } - img.src = "data:image/svg+xml,"; - try { - ctx.drawImage(img, 0, 0); - canvas.toDataURL(); - } - catch (e) { - return false; - } - return true; - }; - var isGreenPixel = function (data) { - return data[0] === 0 && data[1] === 255 && data[2] === 0 && data[3] === 255; - }; - var testForeignObject = function (document) { - var canvas = document.createElement('canvas'); - var size = 100; - canvas.width = size; - canvas.height = size; - var ctx = canvas.getContext('2d'); - if (!ctx) { - return Promise.reject(false); - } - ctx.fillStyle = 'rgb(0, 255, 0)'; - ctx.fillRect(0, 0, size, size); - var img = new Image(); - var greenImageSrc = canvas.toDataURL(); - img.src = greenImageSrc; - var svg = createForeignObjectSVG(size, size, 0, 0, img); - ctx.fillStyle = 'red'; - ctx.fillRect(0, 0, size, size); - return loadSerializedSVG$1(svg) - .then(function (img) { - ctx.drawImage(img, 0, 0); - var data = ctx.getImageData(0, 0, size, size).data; - ctx.fillStyle = 'red'; - ctx.fillRect(0, 0, size, size); - var node = document.createElement('div'); - node.style.backgroundImage = "url(" + greenImageSrc + ")"; - node.style.height = size + "px"; - // Firefox 55 does not render inline tags - return isGreenPixel(data) - ? loadSerializedSVG$1(createForeignObjectSVG(size, size, 0, 0, node)) - : Promise.reject(false); - }) - .then(function (img) { - ctx.drawImage(img, 0, 0); - // Edge does not render background-images - return isGreenPixel(ctx.getImageData(0, 0, size, size).data); - }) - .catch(function () { return false; }); - }; - var createForeignObjectSVG = function (width, height, x, y, node) { - var xmlns = 'http://www.w3.org/2000/svg'; - var svg = document.createElementNS(xmlns, 'svg'); - var foreignObject = document.createElementNS(xmlns, 'foreignObject'); - svg.setAttributeNS(null, 'width', width.toString()); - svg.setAttributeNS(null, 'height', height.toString()); - foreignObject.setAttributeNS(null, 'width', '100%'); - foreignObject.setAttributeNS(null, 'height', '100%'); - foreignObject.setAttributeNS(null, 'x', x.toString()); - foreignObject.setAttributeNS(null, 'y', y.toString()); - foreignObject.setAttributeNS(null, 'externalResourcesRequired', 'true'); - svg.appendChild(foreignObject); - foreignObject.appendChild(node); - return svg; - }; - var loadSerializedSVG$1 = function (svg) { - return new Promise(function (resolve, reject) { - var img = new Image(); - img.onload = function () { return resolve(img); }; - img.onerror = reject; - img.src = "data:image/svg+xml;charset=utf-8," + encodeURIComponent(new XMLSerializer().serializeToString(svg)); - }); - }; - var FEATURES = { - get SUPPORT_RANGE_BOUNDS() { - var value = testRangeBounds(document); - Object.defineProperty(FEATURES, 'SUPPORT_RANGE_BOUNDS', { value: value }); - return value; - }, - get SUPPORT_WORD_BREAKING() { - var value = FEATURES.SUPPORT_RANGE_BOUNDS && testIOSLineBreak(document); - Object.defineProperty(FEATURES, 'SUPPORT_WORD_BREAKING', { value: value }); - return value; - }, - get SUPPORT_SVG_DRAWING() { - var value = testSVG(document); - Object.defineProperty(FEATURES, 'SUPPORT_SVG_DRAWING', { value: value }); - return value; - }, - get SUPPORT_FOREIGNOBJECT_DRAWING() { - var value = typeof Array.from === 'function' && typeof window.fetch === 'function' - ? testForeignObject(document) - : Promise.resolve(false); - Object.defineProperty(FEATURES, 'SUPPORT_FOREIGNOBJECT_DRAWING', { value: value }); - return value; - }, - get SUPPORT_CORS_IMAGES() { - var value = testCORS(); - Object.defineProperty(FEATURES, 'SUPPORT_CORS_IMAGES', { value: value }); - return value; - }, - get SUPPORT_RESPONSE_TYPE() { - var value = testResponseType(); - Object.defineProperty(FEATURES, 'SUPPORT_RESPONSE_TYPE', { value: value }); - return value; - }, - get SUPPORT_CORS_XHR() { - var value = 'withCredentials' in new XMLHttpRequest(); - Object.defineProperty(FEATURES, 'SUPPORT_CORS_XHR', { value: value }); - return value; - }, - get SUPPORT_NATIVE_TEXT_SEGMENTATION() { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var value = !!(typeof Intl !== 'undefined' && Intl.Segmenter); - Object.defineProperty(FEATURES, 'SUPPORT_NATIVE_TEXT_SEGMENTATION', { value: value }); - return value; - } - }; - - var TextBounds = /** @class */ (function () { - function TextBounds(text, bounds) { - this.text = text; - this.bounds = bounds; - } - return TextBounds; - }()); - var parseTextBounds = function (context, value, styles, node) { - var textList = breakText(value, styles); - var textBounds = []; - var offset = 0; - textList.forEach(function (text) { - if (styles.textDecorationLine.length || text.trim().length > 0) { - if (FEATURES.SUPPORT_RANGE_BOUNDS) { - var clientRects = createRange(node, offset, text.length).getClientRects(); - if (clientRects.length > 1) { - var subSegments = segmentGraphemes(text); - var subOffset_1 = 0; - subSegments.forEach(function (subSegment) { - textBounds.push(new TextBounds(subSegment, Bounds.fromDOMRectList(context, createRange(node, subOffset_1 + offset, subSegment.length).getClientRects()))); - subOffset_1 += subSegment.length; - }); - } - else { - textBounds.push(new TextBounds(text, Bounds.fromDOMRectList(context, clientRects))); - } - } - else { - var replacementNode = node.splitText(text.length); - textBounds.push(new TextBounds(text, getWrapperBounds(context, node))); - node = replacementNode; - } - } - else if (!FEATURES.SUPPORT_RANGE_BOUNDS) { - node = node.splitText(text.length); - } - offset += text.length; - }); - return textBounds; - }; - var getWrapperBounds = function (context, node) { - var ownerDocument = node.ownerDocument; - if (ownerDocument) { - var wrapper = ownerDocument.createElement('html2canvaswrapper'); - wrapper.appendChild(node.cloneNode(true)); - var parentNode = node.parentNode; - if (parentNode) { - parentNode.replaceChild(wrapper, node); - var bounds = parseBounds(context, wrapper); - if (wrapper.firstChild) { - parentNode.replaceChild(wrapper.firstChild, wrapper); - } - return bounds; - } - } - return Bounds.EMPTY; - }; - var createRange = function (node, offset, length) { - var ownerDocument = node.ownerDocument; - if (!ownerDocument) { - throw new Error('Node has no owner document'); - } - var range = ownerDocument.createRange(); - range.setStart(node, offset); - range.setEnd(node, offset + length); - return range; - }; - var segmentGraphemes = function (value) { - if (FEATURES.SUPPORT_NATIVE_TEXT_SEGMENTATION) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var segmenter = new Intl.Segmenter(void 0, { granularity: 'grapheme' }); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return Array.from(segmenter.segment(value)).map(function (segment) { return segment.segment; }); - } - return splitGraphemes(value); - }; - var segmentWords = function (value, styles) { - if (FEATURES.SUPPORT_NATIVE_TEXT_SEGMENTATION) { - // eslint-disable-next-line @typescript-eslint/no-explicit-any - var segmenter = new Intl.Segmenter(void 0, { - granularity: 'word' - }); - // eslint-disable-next-line @typescript-eslint/no-explicit-any - return Array.from(segmenter.segment(value)).map(function (segment) { return segment.segment; }); - } - return breakWords(value, styles); - }; - var breakText = function (value, styles) { - return styles.letterSpacing !== 0 ? segmentGraphemes(value) : segmentWords(value, styles); - }; - // https://drafts.csswg.org/css-text/#word-separator - var wordSeparators = [0x0020, 0x00a0, 0x1361, 0x10100, 0x10101, 0x1039, 0x1091]; - var breakWords = function (str, styles) { - var breaker = LineBreaker(str, { - lineBreak: styles.lineBreak, - wordBreak: styles.overflowWrap === "break-word" /* BREAK_WORD */ ? 'break-word' : styles.wordBreak - }); - var words = []; - var bk; - var _loop_1 = function () { - if (bk.value) { - var value = bk.value.slice(); - var codePoints = toCodePoints$1(value); - var word_1 = ''; - codePoints.forEach(function (codePoint) { - if (wordSeparators.indexOf(codePoint) === -1) { - word_1 += fromCodePoint$1(codePoint); - } - else { - if (word_1.length) { - words.push(word_1); - } - words.push(fromCodePoint$1(codePoint)); - word_1 = ''; - } - }); - if (word_1.length) { - words.push(word_1); - } - } - }; - while (!(bk = breaker.next()).done) { - _loop_1(); - } - return words; - }; - - var TextContainer = /** @class */ (function () { - function TextContainer(context, node, styles) { - this.text = transform(node.data, styles.textTransform); - this.textBounds = parseTextBounds(context, this.text, styles, node); - } - return TextContainer; - }()); - var transform = function (text, transform) { - switch (transform) { - case 1 /* LOWERCASE */: - return text.toLowerCase(); - case 3 /* CAPITALIZE */: - return text.replace(CAPITALIZE, capitalize); - case 2 /* UPPERCASE */: - return text.toUpperCase(); - default: - return text; - } - }; - var CAPITALIZE = /(^|\s|:|-|\(|\))([a-z])/g; - var capitalize = function (m, p1, p2) { - if (m.length > 0) { - return p1 + p2.toUpperCase(); - } - return m; - }; - - var ImageElementContainer = /** @class */ (function (_super) { - __extends(ImageElementContainer, _super); - function ImageElementContainer(context, img) { - var _this = _super.call(this, context, img) || this; - _this.src = img.currentSrc || img.src; - _this.intrinsicWidth = img.naturalWidth; - _this.intrinsicHeight = img.naturalHeight; - _this.context.cache.addImage(_this.src); - return _this; - } - return ImageElementContainer; - }(ElementContainer)); - - var CanvasElementContainer = /** @class */ (function (_super) { - __extends(CanvasElementContainer, _super); - function CanvasElementContainer(context, canvas) { - var _this = _super.call(this, context, canvas) || this; - _this.canvas = canvas; - _this.intrinsicWidth = canvas.width; - _this.intrinsicHeight = canvas.height; - return _this; - } - return CanvasElementContainer; - }(ElementContainer)); - - var SVGElementContainer = /** @class */ (function (_super) { - __extends(SVGElementContainer, _super); - function SVGElementContainer(context, img) { - var _this = _super.call(this, context, img) || this; - var s = new XMLSerializer(); - var bounds = parseBounds(context, img); - img.setAttribute('width', bounds.width + "px"); - img.setAttribute('height', bounds.height + "px"); - _this.svg = "data:image/svg+xml," + encodeURIComponent(s.serializeToString(img)); - _this.intrinsicWidth = img.width.baseVal.value; - _this.intrinsicHeight = img.height.baseVal.value; - _this.context.cache.addImage(_this.svg); - return _this; - } - return SVGElementContainer; - }(ElementContainer)); - - var LIElementContainer = /** @class */ (function (_super) { - __extends(LIElementContainer, _super); - function LIElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - _this.value = element.value; - return _this; - } - return LIElementContainer; - }(ElementContainer)); - - var OLElementContainer = /** @class */ (function (_super) { - __extends(OLElementContainer, _super); - function OLElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - _this.start = element.start; - _this.reversed = typeof element.reversed === 'boolean' && element.reversed === true; - return _this; - } - return OLElementContainer; - }(ElementContainer)); - - var CHECKBOX_BORDER_RADIUS = [ - { - type: 15 /* DIMENSION_TOKEN */, - flags: 0, - unit: 'px', - number: 3 - } - ]; - var RADIO_BORDER_RADIUS = [ - { - type: 16 /* PERCENTAGE_TOKEN */, - flags: 0, - number: 50 - } - ]; - var reformatInputBounds = function (bounds) { - if (bounds.width > bounds.height) { - return new Bounds(bounds.left + (bounds.width - bounds.height) / 2, bounds.top, bounds.height, bounds.height); - } - else if (bounds.width < bounds.height) { - return new Bounds(bounds.left, bounds.top + (bounds.height - bounds.width) / 2, bounds.width, bounds.width); - } - return bounds; - }; - var getInputValue = function (node) { - var value = node.type === PASSWORD ? new Array(node.value.length + 1).join('\u2022') : node.value; - return value.length === 0 ? node.placeholder || '' : value; - }; - var CHECKBOX = 'checkbox'; - var RADIO = 'radio'; - var PASSWORD = 'password'; - var INPUT_COLOR = 0x2a2a2aff; - var InputElementContainer = /** @class */ (function (_super) { - __extends(InputElementContainer, _super); - function InputElementContainer(context, input) { - var _this = _super.call(this, context, input) || this; - _this.type = input.type.toLowerCase(); - _this.checked = input.checked; - _this.value = getInputValue(input); - if (_this.type === CHECKBOX || _this.type === RADIO) { - _this.styles.backgroundColor = 0xdededeff; - _this.styles.borderTopColor = - _this.styles.borderRightColor = - _this.styles.borderBottomColor = - _this.styles.borderLeftColor = - 0xa5a5a5ff; - _this.styles.borderTopWidth = - _this.styles.borderRightWidth = - _this.styles.borderBottomWidth = - _this.styles.borderLeftWidth = - 1; - _this.styles.borderTopStyle = - _this.styles.borderRightStyle = - _this.styles.borderBottomStyle = - _this.styles.borderLeftStyle = - 1 /* SOLID */; - _this.styles.backgroundClip = [0 /* BORDER_BOX */]; - _this.styles.backgroundOrigin = [0 /* BORDER_BOX */]; - _this.bounds = reformatInputBounds(_this.bounds); - } - switch (_this.type) { - case CHECKBOX: - _this.styles.borderTopRightRadius = - _this.styles.borderTopLeftRadius = - _this.styles.borderBottomRightRadius = - _this.styles.borderBottomLeftRadius = - CHECKBOX_BORDER_RADIUS; - break; - case RADIO: - _this.styles.borderTopRightRadius = - _this.styles.borderTopLeftRadius = - _this.styles.borderBottomRightRadius = - _this.styles.borderBottomLeftRadius = - RADIO_BORDER_RADIUS; - break; - } - return _this; - } - return InputElementContainer; - }(ElementContainer)); - - var SelectElementContainer = /** @class */ (function (_super) { - __extends(SelectElementContainer, _super); - function SelectElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - var option = element.options[element.selectedIndex || 0]; - _this.value = option ? option.text || '' : ''; - return _this; - } - return SelectElementContainer; - }(ElementContainer)); - - var TextareaElementContainer = /** @class */ (function (_super) { - __extends(TextareaElementContainer, _super); - function TextareaElementContainer(context, element) { - var _this = _super.call(this, context, element) || this; - _this.value = element.value; - return _this; - } - return TextareaElementContainer; - }(ElementContainer)); - - var IFrameElementContainer = /** @class */ (function (_super) { - __extends(IFrameElementContainer, _super); - function IFrameElementContainer(context, iframe) { - var _this = _super.call(this, context, iframe) || this; - _this.src = iframe.src; - _this.width = parseInt(iframe.width, 10) || 0; - _this.height = parseInt(iframe.height, 10) || 0; - _this.backgroundColor = _this.styles.backgroundColor; - try { - if (iframe.contentWindow && - iframe.contentWindow.document && - iframe.contentWindow.document.documentElement) { - _this.tree = parseTree(context, iframe.contentWindow.document.documentElement); - // http://www.w3.org/TR/css3-background/#special-backgrounds - var documentBackgroundColor = iframe.contentWindow.document.documentElement - ? parseColor(context, getComputedStyle(iframe.contentWindow.document.documentElement).backgroundColor) - : COLORS.TRANSPARENT; - var bodyBackgroundColor = iframe.contentWindow.document.body - ? parseColor(context, getComputedStyle(iframe.contentWindow.document.body).backgroundColor) - : COLORS.TRANSPARENT; - _this.backgroundColor = isTransparent(documentBackgroundColor) - ? isTransparent(bodyBackgroundColor) - ? _this.styles.backgroundColor - : bodyBackgroundColor - : documentBackgroundColor; - } - } - catch (e) { } - return _this; - } - return IFrameElementContainer; - }(ElementContainer)); - - var LIST_OWNERS = ['OL', 'UL', 'MENU']; - var parseNodeTree = function (context, node, parent, root) { - for (var childNode = node.firstChild, nextNode = void 0; childNode; childNode = nextNode) { - nextNode = childNode.nextSibling; - if (isTextNode(childNode) && childNode.data.trim().length > 0) { - parent.textNodes.push(new TextContainer(context, childNode, parent.styles)); - } - else if (isElementNode(childNode)) { - if (isSlotElement(childNode) && childNode.assignedNodes) { - childNode.assignedNodes().forEach(function (childNode) { return parseNodeTree(context, childNode, parent, root); }); - } - else { - var container = createContainer(context, childNode); - if (container.styles.isVisible()) { - if (createsRealStackingContext(childNode, container, root)) { - container.flags |= 4 /* CREATES_REAL_STACKING_CONTEXT */; - } - else if (createsStackingContext(container.styles)) { - container.flags |= 2 /* CREATES_STACKING_CONTEXT */; - } - if (LIST_OWNERS.indexOf(childNode.tagName) !== -1) { - container.flags |= 8 /* IS_LIST_OWNER */; - } - parent.elements.push(container); - childNode.slot; - if (childNode.shadowRoot) { - parseNodeTree(context, childNode.shadowRoot, container, root); - } - else if (!isTextareaElement(childNode) && - !isSVGElement(childNode) && - !isSelectElement(childNode)) { - parseNodeTree(context, childNode, container, root); - } - } - } - } - } - }; - var createContainer = function (context, element) { - if (isImageElement(element)) { - return new ImageElementContainer(context, element); - } - if (isCanvasElement(element)) { - return new CanvasElementContainer(context, element); - } - if (isSVGElement(element)) { - return new SVGElementContainer(context, element); - } - if (isLIElement(element)) { - return new LIElementContainer(context, element); - } - if (isOLElement(element)) { - return new OLElementContainer(context, element); - } - if (isInputElement(element)) { - return new InputElementContainer(context, element); - } - if (isSelectElement(element)) { - return new SelectElementContainer(context, element); - } - if (isTextareaElement(element)) { - return new TextareaElementContainer(context, element); - } - if (isIFrameElement(element)) { - return new IFrameElementContainer(context, element); - } - return new ElementContainer(context, element); - }; - var parseTree = function (context, element) { - var container = createContainer(context, element); - container.flags |= 4 /* CREATES_REAL_STACKING_CONTEXT */; - parseNodeTree(context, element, container, container); - return container; - }; - var createsRealStackingContext = function (node, container, root) { - return (container.styles.isPositionedWithZIndex() || - container.styles.opacity < 1 || - container.styles.isTransformed() || - (isBodyElement(node) && root.styles.isTransparent())); - }; - var createsStackingContext = function (styles) { return styles.isPositioned() || styles.isFloating(); }; - var isTextNode = function (node) { return node.nodeType === Node.TEXT_NODE; }; - var isElementNode = function (node) { return node.nodeType === Node.ELEMENT_NODE; }; - var isHTMLElementNode = function (node) { - return isElementNode(node) && typeof node.style !== 'undefined' && !isSVGElementNode(node); - }; - var isSVGElementNode = function (element) { - return typeof element.className === 'object'; - }; - var isLIElement = function (node) { return node.tagName === 'LI'; }; - var isOLElement = function (node) { return node.tagName === 'OL'; }; - var isInputElement = function (node) { return node.tagName === 'INPUT'; }; - var isHTMLElement = function (node) { return node.tagName === 'HTML'; }; - var isSVGElement = function (node) { return node.tagName === 'svg'; }; - var isBodyElement = function (node) { return node.tagName === 'BODY'; }; - var isCanvasElement = function (node) { return node.tagName === 'CANVAS'; }; - var isVideoElement = function (node) { return node.tagName === 'VIDEO'; }; - var isImageElement = function (node) { return node.tagName === 'IMG'; }; - var isIFrameElement = function (node) { return node.tagName === 'IFRAME'; }; - var isStyleElement = function (node) { return node.tagName === 'STYLE'; }; - var isScriptElement = function (node) { return node.tagName === 'SCRIPT'; }; - var isTextareaElement = function (node) { return node.tagName === 'TEXTAREA'; }; - var isSelectElement = function (node) { return node.tagName === 'SELECT'; }; - var isSlotElement = function (node) { return node.tagName === 'SLOT'; }; - // https://html.spec.whatwg.org/multipage/custom-elements.html#valid-custom-element-name - var isCustomElement = function (node) { return node.tagName.indexOf('-') > 0; }; - - var CounterState = /** @class */ (function () { - function CounterState() { - this.counters = {}; - } - CounterState.prototype.getCounterValue = function (name) { - var counter = this.counters[name]; - if (counter && counter.length) { - return counter[counter.length - 1]; - } - return 1; - }; - CounterState.prototype.getCounterValues = function (name) { - var counter = this.counters[name]; - return counter ? counter : []; - }; - CounterState.prototype.pop = function (counters) { - var _this = this; - counters.forEach(function (counter) { return _this.counters[counter].pop(); }); - }; - CounterState.prototype.parse = function (style) { - var _this = this; - var counterIncrement = style.counterIncrement; - var counterReset = style.counterReset; - var canReset = true; - if (counterIncrement !== null) { - counterIncrement.forEach(function (entry) { - var counter = _this.counters[entry.counter]; - if (counter && entry.increment !== 0) { - canReset = false; - if (!counter.length) { - counter.push(1); - } - counter[Math.max(0, counter.length - 1)] += entry.increment; - } - }); - } - var counterNames = []; - if (canReset) { - counterReset.forEach(function (entry) { - var counter = _this.counters[entry.counter]; - counterNames.push(entry.counter); - if (!counter) { - counter = _this.counters[entry.counter] = []; - } - counter.push(entry.reset); - }); - } - return counterNames; - }; - return CounterState; - }()); - var ROMAN_UPPER = { - integers: [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1], - values: ['M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'] - }; - var ARMENIAN = { - integers: [ - 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 900, 800, 700, 600, 500, 400, 300, 200, 100, 90, 80, 70, - 60, 50, 40, 30, 20, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - ], - values: [ - 'Ք', - 'Փ', - 'Ւ', - 'Ց', - 'Ր', - 'Տ', - 'Վ', - 'Ս', - 'Ռ', - 'Ջ', - 'Պ', - 'Չ', - 'Ո', - 'Շ', - 'Ն', - 'Յ', - 'Մ', - 'Ճ', - 'Ղ', - 'Ձ', - 'Հ', - 'Կ', - 'Ծ', - 'Խ', - 'Լ', - 'Ի', - 'Ժ', - 'Թ', - 'Ը', - 'Է', - 'Զ', - 'Ե', - 'Դ', - 'Գ', - 'Բ', - 'Ա' - ] - }; - var HEBREW = { - integers: [ - 10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 400, 300, 200, 100, 90, 80, 70, 60, 50, 40, 30, 20, - 19, 18, 17, 16, 15, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - ], - values: [ - 'י׳', - 'ט׳', - 'ח׳', - 'ז׳', - 'ו׳', - 'ה׳', - 'ד׳', - 'ג׳', - 'ב׳', - 'א׳', - 'ת', - 'ש', - 'ר', - 'ק', - 'צ', - 'פ', - 'ע', - 'ס', - 'נ', - 'מ', - 'ל', - 'כ', - 'יט', - 'יח', - 'יז', - 'טז', - 'טו', - 'י', - 'ט', - 'ח', - 'ז', - 'ו', - 'ה', - 'ד', - 'ג', - 'ב', - 'א' - ] - }; - var GEORGIAN = { - integers: [ - 10000, 9000, 8000, 7000, 6000, 5000, 4000, 3000, 2000, 1000, 900, 800, 700, 600, 500, 400, 300, 200, 100, 90, - 80, 70, 60, 50, 40, 30, 20, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1 - ], - values: [ - 'ჵ', - 'ჰ', - 'ჯ', - 'ჴ', - 'ხ', - 'ჭ', - 'წ', - 'ძ', - 'ც', - 'ჩ', - 'შ', - 'ყ', - 'ღ', - 'ქ', - 'ფ', - 'ჳ', - 'ტ', - 'ს', - 'რ', - 'ჟ', - 'პ', - 'ო', - 'ჲ', - 'ნ', - 'მ', - 'ლ', - 'კ', - 'ი', - 'თ', - 'ჱ', - 'ზ', - 'ვ', - 'ე', - 'დ', - 'გ', - 'ბ', - 'ა' - ] - }; - var createAdditiveCounter = function (value, min, max, symbols, fallback, suffix) { - if (value < min || value > max) { - return createCounterText(value, fallback, suffix.length > 0); - } - return (symbols.integers.reduce(function (string, integer, index) { - while (value >= integer) { - value -= integer; - string += symbols.values[index]; - } - return string; - }, '') + suffix); - }; - var createCounterStyleWithSymbolResolver = function (value, codePointRangeLength, isNumeric, resolver) { - var string = ''; - do { - if (!isNumeric) { - value--; - } - string = resolver(value) + string; - value /= codePointRangeLength; - } while (value * codePointRangeLength >= codePointRangeLength); - return string; - }; - var createCounterStyleFromRange = function (value, codePointRangeStart, codePointRangeEnd, isNumeric, suffix) { - var codePointRangeLength = codePointRangeEnd - codePointRangeStart + 1; - return ((value < 0 ? '-' : '') + - (createCounterStyleWithSymbolResolver(Math.abs(value), codePointRangeLength, isNumeric, function (codePoint) { - return fromCodePoint$1(Math.floor(codePoint % codePointRangeLength) + codePointRangeStart); - }) + - suffix)); - }; - var createCounterStyleFromSymbols = function (value, symbols, suffix) { - if (suffix === void 0) { suffix = '. '; } - var codePointRangeLength = symbols.length; - return (createCounterStyleWithSymbolResolver(Math.abs(value), codePointRangeLength, false, function (codePoint) { return symbols[Math.floor(codePoint % codePointRangeLength)]; }) + suffix); - }; - var CJK_ZEROS = 1 << 0; - var CJK_TEN_COEFFICIENTS = 1 << 1; - var CJK_TEN_HIGH_COEFFICIENTS = 1 << 2; - var CJK_HUNDRED_COEFFICIENTS = 1 << 3; - var createCJKCounter = function (value, numbers, multipliers, negativeSign, suffix, flags) { - if (value < -9999 || value > 9999) { - return createCounterText(value, 4 /* CJK_DECIMAL */, suffix.length > 0); - } - var tmp = Math.abs(value); - var string = suffix; - if (tmp === 0) { - return numbers[0] + string; - } - for (var digit = 0; tmp > 0 && digit <= 4; digit++) { - var coefficient = tmp % 10; - if (coefficient === 0 && contains(flags, CJK_ZEROS) && string !== '') { - string = numbers[coefficient] + string; - } - else if (coefficient > 1 || - (coefficient === 1 && digit === 0) || - (coefficient === 1 && digit === 1 && contains(flags, CJK_TEN_COEFFICIENTS)) || - (coefficient === 1 && digit === 1 && contains(flags, CJK_TEN_HIGH_COEFFICIENTS) && value > 100) || - (coefficient === 1 && digit > 1 && contains(flags, CJK_HUNDRED_COEFFICIENTS))) { - string = numbers[coefficient] + (digit > 0 ? multipliers[digit - 1] : '') + string; - } - else if (coefficient === 1 && digit > 0) { - string = multipliers[digit - 1] + string; - } - tmp = Math.floor(tmp / 10); - } - return (value < 0 ? negativeSign : '') + string; - }; - var CHINESE_INFORMAL_MULTIPLIERS = '十百千萬'; - var CHINESE_FORMAL_MULTIPLIERS = '拾佰仟萬'; - var JAPANESE_NEGATIVE = 'マイナス'; - var KOREAN_NEGATIVE = '마이너스'; - var createCounterText = function (value, type, appendSuffix) { - var defaultSuffix = appendSuffix ? '. ' : ''; - var cjkSuffix = appendSuffix ? '、' : ''; - var koreanSuffix = appendSuffix ? ', ' : ''; - var spaceSuffix = appendSuffix ? ' ' : ''; - switch (type) { - case 0 /* DISC */: - return '•' + spaceSuffix; - case 1 /* CIRCLE */: - return '◦' + spaceSuffix; - case 2 /* SQUARE */: - return '◾' + spaceSuffix; - case 5 /* DECIMAL_LEADING_ZERO */: - var string = createCounterStyleFromRange(value, 48, 57, true, defaultSuffix); - return string.length < 4 ? "0" + string : string; - case 4 /* CJK_DECIMAL */: - return createCounterStyleFromSymbols(value, '〇一二三四五六七八九', cjkSuffix); - case 6 /* LOWER_ROMAN */: - return createAdditiveCounter(value, 1, 3999, ROMAN_UPPER, 3 /* DECIMAL */, defaultSuffix).toLowerCase(); - case 7 /* UPPER_ROMAN */: - return createAdditiveCounter(value, 1, 3999, ROMAN_UPPER, 3 /* DECIMAL */, defaultSuffix); - case 8 /* LOWER_GREEK */: - return createCounterStyleFromRange(value, 945, 969, false, defaultSuffix); - case 9 /* LOWER_ALPHA */: - return createCounterStyleFromRange(value, 97, 122, false, defaultSuffix); - case 10 /* UPPER_ALPHA */: - return createCounterStyleFromRange(value, 65, 90, false, defaultSuffix); - case 11 /* ARABIC_INDIC */: - return createCounterStyleFromRange(value, 1632, 1641, true, defaultSuffix); - case 12 /* ARMENIAN */: - case 49 /* UPPER_ARMENIAN */: - return createAdditiveCounter(value, 1, 9999, ARMENIAN, 3 /* DECIMAL */, defaultSuffix); - case 35 /* LOWER_ARMENIAN */: - return createAdditiveCounter(value, 1, 9999, ARMENIAN, 3 /* DECIMAL */, defaultSuffix).toLowerCase(); - case 13 /* BENGALI */: - return createCounterStyleFromRange(value, 2534, 2543, true, defaultSuffix); - case 14 /* CAMBODIAN */: - case 30 /* KHMER */: - return createCounterStyleFromRange(value, 6112, 6121, true, defaultSuffix); - case 15 /* CJK_EARTHLY_BRANCH */: - return createCounterStyleFromSymbols(value, '子丑寅卯辰巳午未申酉戌亥', cjkSuffix); - case 16 /* CJK_HEAVENLY_STEM */: - return createCounterStyleFromSymbols(value, '甲乙丙丁戊己庚辛壬癸', cjkSuffix); - case 17 /* CJK_IDEOGRAPHIC */: - case 48 /* TRAD_CHINESE_INFORMAL */: - return createCJKCounter(value, '零一二三四五六七八九', CHINESE_INFORMAL_MULTIPLIERS, '負', cjkSuffix, CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 47 /* TRAD_CHINESE_FORMAL */: - return createCJKCounter(value, '零壹貳參肆伍陸柒捌玖', CHINESE_FORMAL_MULTIPLIERS, '負', cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 42 /* SIMP_CHINESE_INFORMAL */: - return createCJKCounter(value, '零一二三四五六七八九', CHINESE_INFORMAL_MULTIPLIERS, '负', cjkSuffix, CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 41 /* SIMP_CHINESE_FORMAL */: - return createCJKCounter(value, '零壹贰叁肆伍陆柒捌玖', CHINESE_FORMAL_MULTIPLIERS, '负', cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS | CJK_HUNDRED_COEFFICIENTS); - case 26 /* JAPANESE_INFORMAL */: - return createCJKCounter(value, '〇一二三四五六七八九', '十百千万', JAPANESE_NEGATIVE, cjkSuffix, 0); - case 25 /* JAPANESE_FORMAL */: - return createCJKCounter(value, '零壱弐参四伍六七八九', '拾百千万', JAPANESE_NEGATIVE, cjkSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS); - case 31 /* KOREAN_HANGUL_FORMAL */: - return createCJKCounter(value, '영일이삼사오육칠팔구', '십백천만', KOREAN_NEGATIVE, koreanSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS); - case 33 /* KOREAN_HANJA_INFORMAL */: - return createCJKCounter(value, '零一二三四五六七八九', '十百千萬', KOREAN_NEGATIVE, koreanSuffix, 0); - case 32 /* KOREAN_HANJA_FORMAL */: - return createCJKCounter(value, '零壹貳參四五六七八九', '拾百千', KOREAN_NEGATIVE, koreanSuffix, CJK_ZEROS | CJK_TEN_COEFFICIENTS | CJK_TEN_HIGH_COEFFICIENTS); - case 18 /* DEVANAGARI */: - return createCounterStyleFromRange(value, 0x966, 0x96f, true, defaultSuffix); - case 20 /* GEORGIAN */: - return createAdditiveCounter(value, 1, 19999, GEORGIAN, 3 /* DECIMAL */, defaultSuffix); - case 21 /* GUJARATI */: - return createCounterStyleFromRange(value, 0xae6, 0xaef, true, defaultSuffix); - case 22 /* GURMUKHI */: - return createCounterStyleFromRange(value, 0xa66, 0xa6f, true, defaultSuffix); - case 22 /* HEBREW */: - return createAdditiveCounter(value, 1, 10999, HEBREW, 3 /* DECIMAL */, defaultSuffix); - case 23 /* HIRAGANA */: - return createCounterStyleFromSymbols(value, 'あいうえおかきくけこさしすせそたちつてとなにぬねのはひふへほまみむめもやゆよらりるれろわゐゑをん'); - case 24 /* HIRAGANA_IROHA */: - return createCounterStyleFromSymbols(value, 'いろはにほへとちりぬるをわかよたれそつねならむうゐのおくやまけふこえてあさきゆめみしゑひもせす'); - case 27 /* KANNADA */: - return createCounterStyleFromRange(value, 0xce6, 0xcef, true, defaultSuffix); - case 28 /* KATAKANA */: - return createCounterStyleFromSymbols(value, 'アイウエオカキクケコサシスセソタチツテトナニヌネノハヒフヘホマミムメモヤユヨラリルレロワヰヱヲン', cjkSuffix); - case 29 /* KATAKANA_IROHA */: - return createCounterStyleFromSymbols(value, 'イロハニホヘトチリヌルヲワカヨタレソツネナラムウヰノオクヤマケフコエテアサキユメミシヱヒモセス', cjkSuffix); - case 34 /* LAO */: - return createCounterStyleFromRange(value, 0xed0, 0xed9, true, defaultSuffix); - case 37 /* MONGOLIAN */: - return createCounterStyleFromRange(value, 0x1810, 0x1819, true, defaultSuffix); - case 38 /* MYANMAR */: - return createCounterStyleFromRange(value, 0x1040, 0x1049, true, defaultSuffix); - case 39 /* ORIYA */: - return createCounterStyleFromRange(value, 0xb66, 0xb6f, true, defaultSuffix); - case 40 /* PERSIAN */: - return createCounterStyleFromRange(value, 0x6f0, 0x6f9, true, defaultSuffix); - case 43 /* TAMIL */: - return createCounterStyleFromRange(value, 0xbe6, 0xbef, true, defaultSuffix); - case 44 /* TELUGU */: - return createCounterStyleFromRange(value, 0xc66, 0xc6f, true, defaultSuffix); - case 45 /* THAI */: - return createCounterStyleFromRange(value, 0xe50, 0xe59, true, defaultSuffix); - case 46 /* TIBETAN */: - return createCounterStyleFromRange(value, 0xf20, 0xf29, true, defaultSuffix); - case 3 /* DECIMAL */: - default: - return createCounterStyleFromRange(value, 48, 57, true, defaultSuffix); - } - }; - - var IGNORE_ATTRIBUTE = 'data-html2canvas-ignore'; - var DocumentCloner = /** @class */ (function () { - function DocumentCloner(context, element, options) { - this.context = context; - this.options = options; - this.scrolledElements = []; - this.referenceElement = element; - this.counters = new CounterState(); - this.quoteDepth = 0; - if (!element.ownerDocument) { - throw new Error('Cloned element does not have an owner document'); - } - this.documentElement = this.cloneNode(element.ownerDocument.documentElement, false); - } - DocumentCloner.prototype.toIFrame = function (ownerDocument, windowSize) { - var _this = this; - var iframe = createIFrameContainer(ownerDocument, windowSize); - if (!iframe.contentWindow) { - return Promise.reject("Unable to find iframe window"); - } - var scrollX = ownerDocument.defaultView.pageXOffset; - var scrollY = ownerDocument.defaultView.pageYOffset; - var cloneWindow = iframe.contentWindow; - var documentClone = cloneWindow.document; - /* Chrome doesn't detect relative background-images assigned in inline - - - -
- - - diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..9483b041ead490a7236db45de60cb90a9296a108 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.isort] +profile = "black" \ No newline at end of file diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..0882f018c76b576d3542bf223d0c55ecfa9e5b61 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,46 @@ +[metadata] +name = dalle-mini +version = attr: dalle_mini.__version__ +author = Boris Dayma et al. +author_email = boris.dayma@gmail.com +description = DALL·E mini - Generate images from a text prompt +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/borisdayma/dalle-mini +project_urls = + Bug Tracker = https://github.com/borisdayma/dalle-mini/issues +classifiers = + Programming Language :: Python :: 3 + License :: OSI Approved :: Apache Software License + Operating System :: OS Independent + Topic :: Scientific/Engineering :: Artificial Intelligence + Development Status :: 3 - Alpha + Intended Audience :: Developers + +[options] +package_dir = + =src +packages = find: +python_requires = >=3.6 +install_requires = + transformers + einops + unidecode + ftfy + emoji + pillow + jax + flax + wandb + +[options.extras_require] +dev = + tqdm + optax + braceexpand + datasets[streaming] + black[jupyter] + isort + +[options.packages.find] +where = src diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..7f1a1763ca9cebc7bc16576d353d3284ee5d3c7d --- /dev/null +++ b/setup.py @@ -0,0 +1,4 @@ +from setuptools import setup + +if __name__ == "__main__": + setup() diff --git a/src/dalle_mini/__init__.py b/src/dalle_mini/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3291c813f642674c3bfdaebdbc7a00d546f495a6 --- /dev/null +++ b/src/dalle_mini/__init__.py @@ -0,0 +1,3 @@ +__version__ = "0.0.4" + +from .model import DalleBart, DalleBartProcessor diff --git a/src/dalle_mini/data.py b/src/dalle_mini/data.py new file mode 100644 index 0000000000000000000000000000000000000000..1f58f4eb414787ef8f909779f140795db6b69bd7 --- /dev/null +++ b/src/dalle_mini/data.py @@ -0,0 +1,378 @@ +import random +from dataclasses import dataclass, field +from functools import partial + +import jax +import jax.numpy as jnp +import numpy as np +from braceexpand import braceexpand +from datasets import Dataset, load_dataset + +from .model.text import TextNormalizer + + +@dataclass +class Dataset: + dataset_repo_or_path: str + train_file: str = None + validation_file: str = None + streaming: bool = True + use_auth_token: bool = False + text_column: str = "caption" + encoding_column: str = "encoding" + max_train_samples: int = None + max_eval_samples: int = None + preprocessing_num_workers: int = None + overwrite_cache: bool = False + do_train: bool = False + do_eval: bool = True + seed_dataset: int = None + shard_by_host: bool = False + blank_caption_prob: float = 0.0 + clip_score_column: str = "clip_score" + min_clip_score: float = None + max_clip_score: float = None + filter_column: str = None + filter_value: str = None + train_dataset: Dataset = field(init=False) + eval_dataset: Dataset = field(init=False) + rng_dataset: jnp.ndarray = field(init=False) + multi_hosts: bool = field(init=False) + + def __post_init__(self): + if self.seed_dataset is None: + # create a random seed + self.seed_dataset = random.randint(0, 2**32 - 1) + self.multi_hosts = jax.process_count() > 1 + # feed blank captions only in streaming mode for now + # otherwise dataset could be cached with same blanked captions + if self.blank_caption_prob: + assert ( + self.streaming is True + ), "blank_caption_prob can only be used in streaming mode" + # define data_files + if self.train_file is not None or self.validation_file is not None: + # accept braceexpand notation + for k in ["train_file", "validation_file"]: + f = getattr(self, k) + if isinstance(f, str): + setattr(self, k, list(braceexpand(f))) + # for list of files, split training data shards by host + if ( + isinstance(self.train_file, list) + and self.multi_hosts + and self.shard_by_host + ): + self.train_file = self.train_file[ + jax.process_index() :: jax.process_count() + ] + data_files = { + "train": self.train_file, + "validation": self.validation_file, + } + else: + data_files = None + + # load dataset + dataset = load_dataset( + self.dataset_repo_or_path, + data_files=data_files, + streaming=self.streaming, + use_auth_token=self.use_auth_token, + ) + if self.do_train: + if "train" not in dataset: + raise ValueError("Training requires a training dataset") + self.train_dataset = dataset["train"] + if self.max_train_samples is not None: + self.train_dataset = ( + self.train_dataset.take(self.max_train_samples) + if self.streaming + else self.train_dataset.select(range(self.max_train_samples)) + ) + if self.do_eval: + if "validation" not in dataset: + raise ValueError("Evaluating requires a validation dataset") + self.eval_dataset = dataset["validation"] + if self.max_eval_samples is not None: + self.eval_dataset = ( + self.eval_dataset.take(self.max_eval_samples) + if self.streaming + else self.eval_dataset.select(range(self.max_eval_samples)) + ) + + def preprocess(self, tokenizer, config): + # get required config variables + decoder_start_token_id = config.decoder_start_token_id + normalize_text = config.normalize_text + max_length = config.max_text_length + + if self.streaming: + # we need to shuffle early in streaming mode + if hasattr(self, "train_dataset"): + self.train_dataset = self.train_dataset.shuffle( + buffer_size=5000, seed=self.seed_dataset + ) + else: + self.rng_dataset = jax.random.PRNGKey(self.seed_dataset) + + # filter data + partial_filter_function = partial( + filter_function, + filter_column=self.filter_column, + filter_value=self.filter_value, + clip_score_column=self.clip_score_column, + min_clip_score=self.min_clip_score, + max_clip_score=self.max_clip_score, + ) + for ds in ["train_dataset", "eval_dataset"]: + if hasattr(self, ds): + setattr( + self, + ds, + ( + getattr(self, ds).filter(partial_filter_function) + if self.streaming + else getattr(self, ds).filter( + partial_filter_function, + num_proc=self.preprocessing_num_workers, + load_from_cache_file=not self.overwrite_cache, + desc="Filtering datasets", + ) + ), + ) + + # normalize text + if normalize_text: + text_normalizer = TextNormalizer() + partial_normalize_function = partial( + normalize_function, + text_column=self.text_column, + text_normalizer=text_normalizer, + ) + for ds in ["train_dataset", "eval_dataset"]: + if hasattr(self, ds): + setattr( + self, + ds, + ( + getattr(self, ds).map(partial_normalize_function) + if self.streaming + else getattr(self, ds).map( + partial_normalize_function, + num_proc=self.preprocessing_num_workers, + load_from_cache_file=not self.overwrite_cache, + desc="Normalizing datasets", + ) + ), + ) + + # blank captions + if self.blank_caption_prob: + partial_blank_caption_function = partial( + blank_caption_function, + text_column=self.text_column, + blank_caption_prob=self.blank_caption_prob, + ) + if hasattr(self, "train_dataset"): + self.train_dataset = ( + self.train_dataset.map(partial_blank_caption_function) + if self.streaming + else self.train_dataset.map( + partial_blank_caption_function, + num_proc=self.preprocessing_num_workers, + load_from_cache_file=False, + desc="Blanking some captions", + ) + ) + + # preprocess + partial_preprocess_function = partial( + preprocess_function, + tokenizer=tokenizer, + text_column=self.text_column, + encoding_column=self.encoding_column, + max_length=max_length, + decoder_start_token_id=decoder_start_token_id, + ) + for ds in ["train_dataset", "eval_dataset"]: + if hasattr(self, ds): + setattr( + self, + ds, + ( + getattr(self, ds).map( + partial_preprocess_function, + batched=True, + remove_columns=[ + self.text_column, + self.encoding_column, + ], + ) + if self.streaming + else getattr(self, ds).map( + partial_preprocess_function, + batched=True, + remove_columns=getattr(ds, "column_names"), + num_proc=self.preprocessing_num_workers, + load_from_cache_file=not self.overwrite_cache, + desc="Preprocessing datasets", + ) + ), + ) + + def dataloader(self, split, batch_size, epoch=None): + def _dataloader_datasets_non_streaming( + dataset: Dataset, + rng: jax.random.PRNGKey = None, + ): + """ + Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices. + Shuffle batches if rng is set. + """ + steps_per_epoch = len(dataset) // batch_size + + if rng is not None: + batch_idx = jax.random.permutation(rng, len(dataset)) + else: + batch_idx = jnp.arange(len(dataset)) + + batch_idx = batch_idx[ + : steps_per_epoch * batch_size + ] # Skip incomplete batch. + batch_idx = batch_idx.reshape((steps_per_epoch, batch_size)) + + for idx in batch_idx: + batch = dataset[idx] + batch = {k: jnp.array(v) for k, v in batch.items()} + yield batch + + def _dataloader_datasets_streaming( + dataset: Dataset, + epoch: int, + ): + keys = ["input_ids", "attention_mask", "labels", "decoder_input_ids"] + batch = {k: [] for k in keys} + first_loop = True # stop after one loop in some cases + while (self.multi_hosts and split == "train") or first_loop: + # in multi-host, we run forever (no epoch) as hosts need to stop + # at the same time and training data may not be split equally + # For validation data we put the entire batch on each host and then + # keep only the one specific to each host (could be improved but not necessary) + if epoch is not None: + assert split == "train" + # reshuffle training data at each epoch + dataset.set_epoch(epoch) + epoch += 1 + for item in dataset: + for k in keys: + batch[k].append(item[k]) + if len(batch[keys[0]]) == batch_size: + batch = {k: jnp.array(v) for k, v in batch.items()} + yield batch + batch = {k: [] for k in keys} + first_loop = False + + if split == "train": + ds = self.train_dataset + elif split == "eval": + ds = self.eval_dataset + else: + raise ValueError(f'split must be "train" or "eval", got {split}') + + if self.streaming: + return _dataloader_datasets_streaming(ds, epoch) + else: + if split == "train": + self.rng_dataset, input_rng = jax.random.split(self.rng_dataset) + return _dataloader_datasets_non_streaming(ds, input_rng) + + @property + def length(self): + len_train_dataset, len_eval_dataset = None, None + if self.streaming: + # we don't know the length, let's just assume max_samples if defined + if self.max_train_samples is not None: + len_train_dataset = self.max_train_samples + if self.max_eval_samples is not None: + len_eval_dataset = self.max_eval_samples + else: + len_train_dataset = ( + len(self.train_dataset) if hasattr(self, "train_dataset") else None + ) + len_eval_dataset = ( + len(self.eval_dataset) if hasattr(self, "eval_dataset") else None + ) + return len_train_dataset, len_eval_dataset + + +def shift_tokens_right(input_ids: np.array, decoder_start_token_id: int): + """ + Shift input ids one token to the right. + """ + shifted_input_ids = np.zeros(input_ids.shape) + shifted_input_ids[:, 1:] = input_ids[:, :-1] + shifted_input_ids[:, 0] = decoder_start_token_id + return shifted_input_ids + + +def blank_caption_function(example, text_column, blank_caption_prob): + if blank_caption_prob and np.random.rand() < blank_caption_prob: + example[text_column] = "" + return example + + +def normalize_function(example, text_column, text_normalizer): + example[text_column] = text_normalizer(example[text_column]) + return example + + +def filter_function( + example, + min_clip_score, + max_clip_score, + clip_score_column, + filter_column, + filter_value, +): + if min_clip_score is not None and example[clip_score_column] < min_clip_score: + return False + if max_clip_score is not None and example[clip_score_column] > max_clip_score: + return False + if filter_column is not None and example[filter_column] != filter_value: + return False + return True + + +def preprocess_function( + examples, + tokenizer, + text_column, + encoding_column, + max_length, + decoder_start_token_id, +): + inputs = examples[text_column] + # Setting padding="max_length" as we need fixed length inputs for jitted functions + model_inputs = tokenizer( + inputs, + max_length=max_length, + padding="max_length", + truncation=True, + return_tensors="np", + ) + + # set up targets + # Note: labels correspond to our target indices + # decoder input ids are the same but shifted to the right with bos at the beginning (and without last token) + labels = examples[encoding_column] + labels = np.asarray(labels) + + # We need the labels, in addition to the decoder_input_ids, for the compute_loss function + model_inputs["labels"] = labels + + # In our case, this prepends the bos token and removes the last one + decoder_input_ids = shift_tokens_right(labels, decoder_start_token_id) + model_inputs["decoder_input_ids"] = decoder_input_ids + + return model_inputs diff --git a/src/dalle_mini/model/__init__.py b/src/dalle_mini/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f6072e3d003379e502518b25ca174f1f1ccc4af --- /dev/null +++ b/src/dalle_mini/model/__init__.py @@ -0,0 +1,5 @@ +from .configuration import DalleBartConfig +from .modeling import DalleBart +from .partitions import set_partitions +from .processor import DalleBartProcessor +from .tokenizer import DalleBartTokenizer diff --git a/src/dalle_mini/model/configuration.py b/src/dalle_mini/model/configuration.py new file mode 100644 index 0000000000000000000000000000000000000000..37897279c36c5d6e521ef55fda4637539d01e885 --- /dev/null +++ b/src/dalle_mini/model/configuration.py @@ -0,0 +1,176 @@ +# coding=utf-8 +# Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" DalleBart model configuration """ +import warnings + +from transformers.configuration_utils import PretrainedConfig +from transformers.utils import logging + +from .utils import PretrainedFromWandbMixin + +logger = logging.get_logger(__name__) + + +class DalleBartConfig(PretrainedFromWandbMixin, PretrainedConfig): + model_type = "dallebart" + keys_to_ignore_at_inference = ["past_key_values"] + attribute_map = { + "num_attention_heads": "encoder_attention_heads", + "hidden_size": "d_model", + } + + def __init__( + self, + normalize_text=False, + encoder_vocab_size=50264, + image_vocab_size=16384, # encoded image token space + image_length=256, # number of encoded tokens + max_text_length=64, # max number of text tokens + encoder_layers=12, + encoder_ffn_dim=4096, + encoder_attention_heads=16, + decoder_layers=12, + decoder_ffn_dim=4096, + decoder_attention_heads=16, + activation_function="gelu", + d_model=1024, + dropout=0.1, + attention_dropout=0.0, + activation_dropout=0.0, + init_std=0.02, + scale_embedding=False, + gradient_checkpointing=False, + use_cache=True, + is_encoder_decoder=True, + forced_eos_token_id=None, + tie_word_embeddings=False, # different modalities and sizes + do_sample=True, + # transformer variants + use_bias=False, # use bias in attention and dense layers (except for lm_head) + ln_type="layernorm", # layer normalization type, "rmsnorm", "layernorm" + ln_positions="normformer", # layer normalization positions, "normformer", "swinv2", "cogview", "postln", "preln", "deepnet" (same as postln) + use_head_scale=False, # used in NormFormer + use_cosine_attention=False, # used in Swin v2 + tau_init=0.05, # used only in cosine attention (Swin v2) + use_absolute_position_embeddings=True, # default + use_swin_position_embeddings=False, # used in Swin v1/v2 + use_deepnet_scaling=False, # used in Deepnet + use_glu=False, # "GLU Variants Improve Transformer" + use_alibi=False, # Not implemented yet - from "Train Short, Test Long: Attention with Linear Biases Enables Input Length Extrapolation" + sinkhorn_iters=1, # used in SinkFormers + use_final_ln_encoder=True, # final layer normalization in encoder + use_final_ln_decoder=True, # final layer normalization in decoder + # parameters that should not be necessary but could affect results + force_ln_scale=False, # force scale in layernorm even when followed by dense layers + **kwargs, + ): + # text normalizer + self.normalize_text = normalize_text + + # transformer variants + self.use_bias = use_bias + assert ln_type in [ + "rmsnorm", + "layernorm", + ], "ln_type must be 'rmsnorm' or 'layernorm'" + self.ln_type = ln_type + if ln_positions == "deepnet": + ln_positions = "postln" + assert ln_positions in [ + "normformer", + "swinv2", + "cogview", + "postln", + "preln", + ], "ln_positions must be 'normformer', 'swinv2', 'cogview', 'postln', 'preln'" + self.use_head_scale = use_head_scale + assert use_alibi is False, "use_alibi is not supported yet" + self.ln_positions = ln_positions + self.use_cosine_attention = use_cosine_attention + self.tau_init = tau_init + self.use_absolute_position_embeddings = use_absolute_position_embeddings + self.use_swin_position_embeddings = use_swin_position_embeddings + self.use_deepnet_scaling = use_deepnet_scaling + self.use_glu = use_glu + self.use_alibi = use_alibi + self.sinkhorn_iters = sinkhorn_iters + if ln_positions == "postln": + assert ( + use_final_ln_encoder + ), "use_final_ln_encoder must be True when ln_positions is 'postln'" + assert ( + use_final_ln_decoder + ), "use_final_ln_decoder must be True when ln_positions is 'postln'" + self.use_final_ln_encoder = use_final_ln_encoder + self.use_final_ln_decoder = use_final_ln_decoder + self.force_ln_scale = force_ln_scale + + # common parameters + self.encoder_vocab_size = encoder_vocab_size + self.image_vocab_size = image_vocab_size + self.image_length = image_length + self.max_text_length = max_text_length + self.d_model = d_model + self.encoder_ffn_dim = encoder_ffn_dim + self.encoder_layers = encoder_layers + self.encoder_attention_heads = encoder_attention_heads + self.decoder_ffn_dim = decoder_ffn_dim + self.decoder_layers = decoder_layers + self.decoder_attention_heads = decoder_attention_heads + self.dropout = dropout + self.attention_dropout = attention_dropout + self.activation_dropout = activation_dropout + self.activation_function = activation_function + self.init_std = init_std + self.use_cache = use_cache + self.gradient_checkpointing = gradient_checkpointing + self.scale_embedding = ( + scale_embedding # scale factor will be sqrt(d_model) if True + ) + + # special token id's are appended to vocab if not provided + decoder_start_token_id = kwargs.pop("decoder_start_token_id", image_vocab_size) + bos_token_id = kwargs.pop("bos_token_id", image_vocab_size) + pad_token_id = kwargs.pop("pad_token_id", image_vocab_size) + eos_token_id = kwargs.pop("eos_token_id", image_vocab_size) + + # we generate to image_length + 1 (for bos) by default + min_length = kwargs.pop("min_length", image_length + 1) + max_length = kwargs.pop("max_length", image_length + 1) + + super().__init__( + # args required in parent class + is_encoder_decoder=is_encoder_decoder, + tie_word_embeddings=tie_word_embeddings, + forced_eos_token_id=forced_eos_token_id, + decoder_start_token_id=decoder_start_token_id, + bos_token_id=bos_token_id, + pad_token_id=pad_token_id, + eos_token_id=eos_token_id, + min_length=min_length, + max_length=max_length, + do_sample=do_sample, + **kwargs, + ) + + # ensure backward compatibility for BART CNN models + if self.forced_bos_token_id is None and kwargs.get( + "force_bos_token_to_be_generated", False + ): + self.forced_bos_token_id = self.bos_token_id + warnings.warn( + f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions." + "The config can simply be saved and uploaded again to be fixed." + ) diff --git a/src/dalle_mini/model/modeling.py b/src/dalle_mini/model/modeling.py new file mode 100644 index 0000000000000000000000000000000000000000..1fd2db4e30b6084e818b9ed2c5f1af4c7a5beba8 --- /dev/null +++ b/src/dalle_mini/model/modeling.py @@ -0,0 +1,2093 @@ +# coding=utf-8 +# Copyright 2021-2022 The Fairseq Authors and The Google Flax Team Authors And The HuggingFace Inc. team and & DALL·E Mini team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" DalleBart model. """ + +import math +import os +from functools import partial +from pickle import UnpicklingError +from typing import Any, Dict, Optional, Tuple, Union + +import flax +import flax.linen as nn +import jax +import jax.numpy as jnp +import msgpack.exceptions +from einops import rearrange +from flax.core.frozen_dict import unfreeze +from flax.linen import combine_masks, make_causal_mask +from flax.linen import partitioning as nn_partitioning +from flax.linen.linear import PrecisionLike +from flax.serialization import from_bytes +from flax.traverse_util import flatten_dict, unflatten_dict +from jax import custom_jvp, lax +from jax.random import PRNGKey +from transformers.configuration_utils import PretrainedConfig +from transformers.file_utils import ( + FLAX_WEIGHTS_NAME, + WEIGHTS_NAME, + cached_path, + hf_bucket_url, + is_offline_mode, + is_remote_url, +) +from transformers.generation_flax_utils import FlaxSampleOutput +from transformers.modeling_flax_outputs import ( + FlaxBaseModelOutput, + FlaxBaseModelOutputWithPastAndCrossAttentions, + FlaxCausalLMOutputWithCrossAttentions, + FlaxSeq2SeqLMOutput, +) +from transformers.modeling_flax_utils import ACT2FN +from transformers.models.bart.modeling_flax_bart import ( + FlaxBartAttention, + FlaxBartForConditionalGeneration, + FlaxBartForConditionalGenerationModule, + FlaxBartModule, + FlaxBartPreTrainedModel, +) +from transformers.utils import logging + +from .configuration import DalleBartConfig +from .utils import PretrainedFromWandbMixin + +logger = logging.get_logger(__name__) + +remat = nn_partitioning.remat + + +def smelu(beta: Any = 1.0): + """ + Implementation of "Real World Large Scale Recommendation Systems Reproducibility and Smooth Activations" + https://arxiv.org/abs/2202.06499 + """ + + @custom_jvp + @jax.jit + def _smelu(x: Any) -> Any: + x = jnp.where(x <= -beta, 0.0, x) + return jnp.where(x >= beta, x, jnp.square(x + beta) / (4 * beta)) + + _smelu.defjvps( + lambda g, ans, x: lax.select( + x == -beta, + lax.full_like(g, 0), + lax.select(x == beta, lax.full_like(g, 1), g), + ) + ) + return _smelu + + +ACT2FN.update({"smelu": smelu}) + +# deepnet initialization +def deepnet_init(gain=1): + init = jax.nn.initializers.glorot_normal() + + def _init(*args, **kwargs): + return gain * init(*args, **kwargs) + + return _init + + +# deepnet gain +deepnet_gain = { + "encoder": { + "alpha": lambda config: 0.81 + * (config.encoder_layers**4 * config.decoder_layers) ** 0.0625, + "beta": lambda config: 0.87 + * (config.encoder_layers**4 * config.decoder_layers) ** -0.0625, + }, + "decoder": { + "alpha": lambda config: (3 * config.decoder_layers) ** 0.25, + "beta": lambda config: (12 * config.decoder_layers) ** -0.25, + }, +} + + +class RMSNorm(nn.Module): + """ + From "Root Mean Square Layer Normalization" by https://arxiv.org/abs/1910.07467 + + Adapted from flax.linen.LayerNorm + """ + + epsilon: float = 1e-6 + dtype: Any = jnp.float32 + param_dtype: Any = jnp.float32 + use_scale: bool = True + scale_init: Any = jax.nn.initializers.ones + + @nn.compact + def __call__(self, x): + reduction_axes = (-1,) + feature_axes = (-1,) + + rms_sq = self._compute_rms_sq(x, reduction_axes) + + return self._normalize( + self, + x, + rms_sq, + reduction_axes, + feature_axes, + self.dtype, + self.param_dtype, + self.epsilon, + self.use_scale, + self.scale_init, + ) + + def _compute_rms_sq(self, x, axes): + x = jnp.asarray(x, jnp.promote_types(jnp.float32, jnp.result_type(x))) + rms_sq = jnp.mean(jax.lax.square(x), axes) + return rms_sq + + def _normalize( + self, + mdl, + x, + rms_sq, + reduction_axes, + feature_axes, + dtype, + param_dtype, + epsilon, + use_scale, + scale_init, + ): + reduction_axes = nn.normalization._canonicalize_axes(x.ndim, reduction_axes) + feature_axes = nn.normalization._canonicalize_axes(x.ndim, feature_axes) + stats_shape = list(x.shape) + for axis in reduction_axes: + stats_shape[axis] = 1 + rms_sq = rms_sq.reshape(stats_shape) + feature_shape = [1] * x.ndim + reduced_feature_shape = [] + for ax in feature_axes: + feature_shape[ax] = x.shape[ax] + reduced_feature_shape.append(x.shape[ax]) + mul = lax.rsqrt(rms_sq + epsilon) + if use_scale: + scale = mdl.param( + "scale", scale_init, reduced_feature_shape, param_dtype + ).reshape(feature_shape) + mul *= scale + y = mul * x + return jnp.asarray(y, dtype) + + +def norm(type, *args, **kwargs): + if type == "rmsnorm": + return RMSNorm(*args, **kwargs) + elif type == "layernorm": + return nn.LayerNorm(*args, **kwargs) + else: + raise ValueError(f"Unknown norm type {type}") + + +def dot_product_attention_weights( + query: Any, + key: Any, + bias: Optional[Any] = None, + mask: Optional[Any] = None, + embed_pos: Optional[Any] = None, + broadcast_dropout: bool = True, + dropout_rng: Optional[PRNGKey] = None, + dropout_rate: float = 0.0, + deterministic: bool = False, + dtype: Any = jnp.float32, + precision: PrecisionLike = None, + sinkhorn_iters: int = 1, + is_encoder: bool = False, +): + """ + Computes dot-product attention weights given query and key. + mask is included into the bias. + + Adapted from flax.linen.attention.dot_product_attention_weights" + """ + assert query.ndim == key.ndim, "q, k must have same rank." + assert query.shape[:-3] == key.shape[:-3], "q, k batch dims must match." + assert query.shape[-2] == key.shape[-2], "q, k num_heads must match." + assert query.shape[-1] == key.shape[-1], "q, k depths must match." + + # calculate attention matrix + depth = query.shape[-1] + query = query / jnp.sqrt(depth).astype(dtype) + # attn weight shape is (batch..., num_heads, q_length, kv_length) + attn_weights = jnp.einsum("...qhd,...khd->...hqk", query, key, precision=precision) + + # apply attention bias: masking, dropout, proximity bias, etc. + if bias is not None: + attn_weights = attn_weights + bias + + # add relative position + if embed_pos is not None: + attn_weights = attn_weights + embed_pos + + # normalize the attention weights + if not is_encoder or sinkhorn_iters == 1: + # sinkhorn does not work for causal (leaks info of future tokens into past) + attn_weights = jax.nn.softmax(attn_weights).astype(dtype) + else: + # adapted from https://github.com/lucidrains/sinkhorn-transformer + for i in range(sinkhorn_iters): + # when causal, some attn_weights have been set to -inf through bias + if i % 2 == 0: + attn_weights -= jax.nn.logsumexp(attn_weights, axis=-1, keepdims=True) + else: + attn_weights -= jax.nn.logsumexp(attn_weights, axis=-2, keepdims=True) + if mask is not None: + attn_weights = jnp.where(mask, attn_weights, -jnp.inf) + attn_weights = jnp.exp(attn_weights).astype(dtype) + + # apply attention dropout + if not deterministic and dropout_rate > 0.0: + keep_prob = 1.0 - dropout_rate + if broadcast_dropout: + # dropout is broadcast across the batch + head dimensions + dropout_shape = tuple([1] * (key.ndim - 2)) + attn_weights.shape[-2:] + keep = jax.random.bernoulli(dropout_rng, keep_prob, dropout_shape) + else: + keep = jax.random.bernoulli(dropout_rng, keep_prob, attn_weights.shape) + multiplier = keep.astype(attn_weights.dtype) / jnp.asarray( + keep_prob, dtype=dtype + ) + attn_weights = attn_weights * multiplier + + return attn_weights + + +class FlaxBartAttention(FlaxBartAttention): + """ + Edits: + - causal mask is used only in decoder and considers image_length + - scale attention heads per NormFormer paper + """ + + is_encoder: bool = False + q_length: int = None + k_length: int = None + + def setup(self) -> None: + self.head_dim = self.embed_dim // self.num_heads + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" + f" and `num_heads`: {self.num_heads})." + ) + + dense = partial( + nn.Dense, + self.embed_dim, + use_bias=self.bias, + dtype=self.dtype, + ) + + gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"]( + self.config + ) + + self.q_proj = dense( + kernel_init=deepnet_init() + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std) + ) + self.k_proj = dense( + kernel_init=deepnet_init() + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std) + ) + self.v_proj = dense( + kernel_init=deepnet_init(gain) + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std) + ) + self.out_proj = dense( + kernel_init=deepnet_init(gain) + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std) + ) + self.dropout_layer = nn.Dropout(rate=self.dropout) + + if self.config.use_head_scale: + self.head_scale = self.param( + "head_scale", jax.nn.initializers.ones, (1, 1, self.num_heads, 1) + ) + + if self.config.use_cosine_attention: + self.tau = self.param( + "tau", + jax.nn.initializers.constant(self.config.tau_init), + (1, self.num_heads, 1, 1), + ) + + if self.config.use_swin_position_embeddings: + self.rel_bias = nn.Embed( + self.q_length, + self.k_length * self.num_heads, + embedding_init=deepnet_init() + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std), + ) + + if self.causal: + # used only in decoder + self.causal_mask = make_causal_mask( + jnp.ones((1, self.config.image_length), dtype="bool"), dtype="bool" + ) + + def __call__( + self, + hidden_states: jnp.ndarray, + key_value_states: Optional[jnp.ndarray] = None, + attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + """Input shape: Batch x Time x Channel""" + + # if key_value_states are provided this layer is used as a cross-attention layer + # for the decoder + is_cross_attention = key_value_states is not None + batch_size = hidden_states.shape[0] + + # get query proj + query_states = self.q_proj(hidden_states) + # get key, value proj + if is_cross_attention: + # cross_attentions + key_states = self.k_proj(key_value_states) + value_states = self.v_proj(key_value_states) + else: + # self_attention + key_states = self.k_proj(hidden_states) + value_states = self.v_proj(hidden_states) + + query_states = self._split_heads(query_states) + key_states = self._split_heads(key_states) + value_states = self._split_heads(value_states) + + # handle cache prepare causal attention mask + if self.causal: + query_length, key_length = query_states.shape[1], key_states.shape[1] + if self.has_variable("cache", "cached_key"): + mask_shift = self.variables["cache"]["cache_index"] + max_decoder_length = self.variables["cache"]["cached_key"].shape[1] + causal_mask = lax.dynamic_slice( + self.causal_mask, + (0, 0, mask_shift, 0), + (1, 1, query_length, max_decoder_length), + ) + else: + causal_mask = self.causal_mask[:, :, :query_length, :key_length] + causal_mask = jnp.broadcast_to( + causal_mask, (batch_size,) + causal_mask.shape[1:] + ) + + # combine masks if needed + if attention_mask is not None and self.causal: + attention_mask = jnp.broadcast_to( + jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape + ) + attention_mask = combine_masks(attention_mask, causal_mask) + elif self.causal: + attention_mask = causal_mask + elif attention_mask is not None: + attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) + + # During fast autoregressive decoding, we feed one position at a time, + # and cache the keys and values step by step. + if self.causal and (self.has_variable("cache", "cached_key") or init_cache): + key_states, value_states, attention_mask = self._concatenate_to_cache( + key_states, value_states, query_states, attention_mask + ) + + # Convert the boolean attention mask to an attention bias. + if attention_mask is not None: + # attention mask in the form of attention bias + attention_bias = lax.select( + attention_mask > 0, + jnp.full(attention_mask.shape, 0.0).astype(self.dtype), + jnp.full(attention_mask.shape, -jnp.inf).astype(self.dtype), + ) + else: + attention_bias = None + + dropout_rng = None + if not deterministic and self.dropout > 0.0: + dropout_rng = self.make_rng("dropout") + + if self.config.use_cosine_attention: + # normalize q and k + query_states = query_states / ( + jnp.linalg.norm(query_states, axis=-1, keepdims=True) + 1e-8 + ) + key_states = key_states / ( + jnp.linalg.norm(key_states, axis=-1, keepdims=True) + 1e-8 + ) + + # relative position embeddings + if self.config.use_swin_position_embeddings: + position_ids = jnp.arange(self.q_length) + embed_pos = self.rel_bias(position_ids) + embed_pos = rearrange(embed_pos, "q (k h) -> 1 h q k", h=self.num_heads) + else: + embed_pos = None + + attn_weights = dot_product_attention_weights( + query_states, + key_states, + bias=attention_bias, + mask=attention_mask, + embed_pos=embed_pos, + dropout_rng=dropout_rng, + dropout_rate=self.dropout, + broadcast_dropout=True, + deterministic=deterministic, + dtype=self.dtype, + precision=None, + sinkhorn_iters=self.config.sinkhorn_iters, + is_encoder=self.is_encoder, + ) + if self.config.use_cosine_attention: + # divide by tau + attn_weights = attn_weights / jnp.maximum(self.tau, 0.01) + + attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) + if self.config.use_head_scale: + # per Normformer + attn_output = attn_output * self.head_scale + attn_output = self._merge_heads(attn_output) + attn_output = self.out_proj(attn_output) + + return attn_output, attn_weights + + +class GLU(nn.Module): + """From "GLU Variants Improve Transformer" by https://arxiv.org/abs/2002.05202""" + + config: DalleBartConfig + ffn_dim: int + embed_dim: int + dtype: jnp.dtype = jnp.float32 + is_encoder: bool = False + + @nn.compact + def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + + gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"]( + self.config + ) + + if self.config.ln_positions in ["normformer", "cogview", "preln"]: + x = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=self.config.force_ln_scale, + )(x) + w = nn.Dense( + self.ffn_dim, + dtype=self.dtype, + use_bias=self.config.use_bias, + kernel_init=deepnet_init(gain) + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std), + )(x) + w = ACT2FN[self.config.activation_function](w) + v = nn.Dense( + self.ffn_dim, + dtype=self.dtype, + use_bias=self.config.use_bias, + kernel_init=deepnet_init(gain) + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std), + )(x) + x = w * v + if self.config.ln_positions in ["normformer"]: + x = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=self.config.force_ln_scale, + )(x) + x = nn.Dropout(rate=self.config.activation_dropout)( + x, deterministic=deterministic + ) + + x = nn.Dense( + self.embed_dim, + dtype=self.dtype, + use_bias=self.config.use_bias, + kernel_init=deepnet_init(gain) + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std), + )(x) + if self.config.ln_positions in ["swinv2", "cogview"]: + x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x) + x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic) + return x + + +class FFN(nn.Module): + """Simple FFN layer""" + + config: DalleBartConfig + ffn_dim: int + embed_dim: int + dtype: jnp.dtype = jnp.float32 + is_encoder: bool = False + + @nn.compact + def __call__(self, x: jnp.ndarray, deterministic: bool = True) -> jnp.ndarray: + + gain = deepnet_gain["encoder" if self.is_encoder else "decoder"]["beta"]( + self.config + ) + if self.config.ln_positions in ["normformer", "cogview", "preln"]: + x = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=self.config.force_ln_scale, + )(x) + x = nn.Dense( + self.ffn_dim, + dtype=self.dtype, + use_bias=self.config.use_bias, + kernel_init=deepnet_init(gain) + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std), + )(x) + x = ACT2FN[self.config.activation_function](x) + if self.config.ln_positions in ["normformer"]: + x = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=self.config.force_ln_scale, + )(x) + x = nn.Dropout(rate=self.config.activation_dropout)( + x, deterministic=deterministic + ) + x = nn.Dense( + self.embed_dim, + dtype=self.dtype, + use_bias=self.config.use_bias, + kernel_init=deepnet_init(gain) + if self.config.use_deepnet_scaling + else jax.nn.initializers.normal(self.config.init_std), + )(x) + if self.config.ln_positions in ["swinv2", "cogview"]: + x = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)(x) + x = nn.Dropout(rate=self.config.dropout)(x, deterministic=deterministic) + return x + + +class FlaxBartEncoderLayer(nn.Module): + """ + Edits: + - no bias + - use custom FlaxBartAttention + """ + + config: DalleBartConfig + dtype: jnp.dtype = jnp.float32 + add_norm: bool = False + use_scale: bool = True + + @nn.compact + def __call__( + self, + hidden_states: jnp.ndarray, + attention_mask: jnp.ndarray, + output_attentions: bool = True, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + + res_gain = ( + deepnet_gain["encoder"]["alpha"](self.config) + if self.config.use_deepnet_scaling + else 1 + ) + + embed_dim = self.config.d_model + residual = hidden_states + if self.config.ln_positions in ["normformer", "cogview", "preln"]: + hidden_states = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=self.config.force_ln_scale, + )(hidden_states) + hidden_states, attn_weights = FlaxBartAttention( + config=self.config, + embed_dim=embed_dim, + num_heads=self.config.encoder_attention_heads, + dropout=self.config.attention_dropout, + bias=self.config.use_bias, + dtype=self.dtype, + is_encoder=True, + q_length=self.config.max_text_length, + k_length=self.config.max_text_length, + )(hidden_states=hidden_states, attention_mask=attention_mask) + + if self.config.ln_positions in ["normformer", "swinv2", "cogview"]: + hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( + hidden_states + ) + hidden_states = nn.Dropout(rate=self.config.dropout)( + hidden_states, deterministic=deterministic + ) + hidden_states = residual * res_gain + hidden_states + if self.config.ln_positions in ["postln"]: + hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( + hidden_states + ) + + residual = hidden_states + ff_block = ( + GLU( + config=self.config, + ffn_dim=self.config.encoder_ffn_dim, + embed_dim=embed_dim, + dtype=self.dtype, + is_encoder=True, + ) + if self.config.use_glu + else FFN( + config=self.config, + ffn_dim=self.config.encoder_ffn_dim, + embed_dim=embed_dim, + dtype=self.dtype, + is_encoder=True, + ) + ) + hidden_states = ff_block(hidden_states, deterministic=deterministic) + hidden_states = residual * res_gain + hidden_states + if self.add_norm or self.config.ln_positions in ["postln"]: + use_scale = ( + self.use_scale + or self.config.ln_positions == "postln" + or self.config.force_ln_scale + ) + hidden_states = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=use_scale, + )(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights,) + + return outputs + + +class FlaxBartDecoderLayer(nn.Module): + """ + Edits: + - no bias + - use custom FlaxBartAttention + """ + + config: DalleBartConfig + dtype: jnp.dtype = jnp.float32 + add_norm: bool = False + use_scale: bool = False + + @nn.compact + def __call__( + self, + hidden_states: jnp.ndarray, + attention_mask: jnp.ndarray, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + output_attentions: bool = True, + deterministic: bool = True, + ) -> Tuple[jnp.ndarray]: + + res_gain = ( + deepnet_gain["decoder"]["alpha"](self.config) + if self.config.use_deepnet_scaling + else 1 + ) + + embed_dim = self.config.d_model + residual = hidden_states + + # Self Attention + if self.config.ln_positions in ["normformer", "cogview", "preln"]: + hidden_states = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=self.config.force_ln_scale, + )(hidden_states) + hidden_states, attn_weights = FlaxBartAttention( + config=self.config, + embed_dim=embed_dim, + num_heads=self.config.decoder_attention_heads, + dropout=self.config.attention_dropout, + causal=True, + bias=self.config.use_bias, + dtype=self.dtype, + is_encoder=False, + q_length=self.config.image_length, + k_length=self.config.image_length, + )( + hidden_states=hidden_states, + attention_mask=attention_mask, + init_cache=init_cache, + ) + + if self.config.ln_positions in ["normformer", "swinv2", "cogview"]: + hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( + hidden_states + ) + hidden_states = nn.Dropout(rate=self.config.dropout)( + hidden_states, deterministic=deterministic + ) + hidden_states = residual * res_gain + hidden_states + if self.config.ln_positions in ["postln"]: + hidden_states = norm(self.config.ln_type, dtype=self.dtype, epsilon=1e-05)( + hidden_states + ) + + # Cross Attention + cross_attn_weights = None + if encoder_hidden_states is not None: + residual = hidden_states + if self.config.ln_positions in ["normformer", "cogview", "preln"]: + hidden_states = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=self.config.force_ln_scale, + )(hidden_states) + hidden_states, cross_attn_weights = FlaxBartAttention( + config=self.config, + embed_dim=embed_dim, + num_heads=self.config.decoder_attention_heads, + dropout=self.config.attention_dropout, + bias=self.config.use_bias, + dtype=self.dtype, + is_encoder=False, + q_length=self.config.image_length, + k_length=self.config.max_text_length, + )( + hidden_states=hidden_states, + key_value_states=encoder_hidden_states, + attention_mask=encoder_attention_mask, + ) + if self.config.ln_positions in ["normformer", "swinv2", "cogview"]: + hidden_states = norm( + self.config.ln_type, dtype=self.dtype, epsilon=1e-05 + )(hidden_states) + hidden_states = nn.Dropout(rate=self.config.dropout)( + hidden_states, deterministic=deterministic + ) + hidden_states = residual * res_gain + hidden_states + if self.config.ln_positions in ["postln"]: + hidden_states = norm( + self.config.ln_type, dtype=self.dtype, epsilon=1e-05 + )(hidden_states) + + # Feed forward + residual = hidden_states + ff_block = ( + GLU( + config=self.config, + ffn_dim=self.config.decoder_ffn_dim, + embed_dim=embed_dim, + dtype=self.dtype, + is_encoder=False, + ) + if self.config.use_glu + else FFN( + config=self.config, + ffn_dim=self.config.decoder_ffn_dim, + embed_dim=embed_dim, + dtype=self.dtype, + is_encoder=False, + ) + ) + hidden_states = ff_block(hidden_states, deterministic=deterministic) + hidden_states = residual * res_gain + hidden_states + if self.add_norm or self.config.ln_positions in ["postln"]: + use_scale = ( + self.use_scale + or self.config.ln_positions == "postln" + or self.config.force_ln_scale + ) + hidden_states = norm( + self.config.ln_type, + dtype=self.dtype, + epsilon=1e-05, + use_scale=use_scale, + )(hidden_states) + + outputs = (hidden_states,) + + if output_attentions: + outputs += (attn_weights, cross_attn_weights) + + return outputs + + +class FlaxBartEncoderLayerCollection(nn.Module): + config: DalleBartConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + """ + Edits: + - use custom FlaxBartEncoderLayer + - allow Gradient Checkpointing (nn.remat) + """ + + @nn.compact + def __call__( + self, + hidden_states, + attention_mask, + deterministic: bool = True, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + + n_layers = self.config.encoder_layers + layer = ( + remat(FlaxBartEncoderLayer, static_argnums=(2, 3)) + if self.config.gradient_checkpointing + else FlaxBartEncoderLayer + ) + for i in range(n_layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + # final layernorm on the output of the last layer + # or every 6 layers for Swin v2 + add_norm = ( + self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0) + ) or (self.config.use_final_ln_encoder and (i == n_layers - 1)) + # we don't need to scale the norm for the last layer + use_scale = i != n_layers - 1 + layer_outputs = layer( + self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale + )( + hidden_states, + attention_mask, + output_attentions, + deterministic, + ) + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attns += (layer_outputs[1],) + + # add hidden states from the last layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = [ + hidden_states, + all_hidden_states, + all_self_attns, + ] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutput( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + ) + + +class FlaxBartDecoderLayerCollection(nn.Module): + config: DalleBartConfig + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + """ + Edits: + - use custom FlaxBartDecoderLayer + - allow Gradient Checkpointing (nn.remat) + """ + + @nn.compact + def __call__( + self, + hidden_states, + attention_mask, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + deterministic: bool = True, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + ): + # decoder layers + all_hidden_states = () if output_hidden_states else None + all_self_attns = () if output_attentions else None + all_cross_attentions = ( + () if (output_attentions and encoder_hidden_states is not None) else None + ) + + n_layers = self.config.decoder_layers + layer = ( + remat(FlaxBartDecoderLayer, static_argnums=(4, 5, 6)) + if self.config.gradient_checkpointing + else FlaxBartDecoderLayer + ) + for i in range(n_layers): + if output_hidden_states: + all_hidden_states += (hidden_states,) + # final layernorm on the output of the last layer + # or every 6 layers for Swin v2 + add_norm = ( + self.config.ln_positions == "swinv2" and ((i + 1) % 6 == 0) + ) or (self.config.use_final_ln_decoder and (i == n_layers - 1)) + # we don't need to scale the norm for the last layer + use_scale = i != n_layers - 1 + layer_outputs = layer( + self.config, dtype=self.dtype, add_norm=add_norm, use_scale=use_scale + )( + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + init_cache, + output_attentions, + deterministic, + ) + + hidden_states = layer_outputs[0] + if output_attentions: + all_self_attns += (layer_outputs[1],) + + if encoder_hidden_states is not None: + all_cross_attentions += (layer_outputs[2],) + + # add hidden states from the last decoder layer + if output_hidden_states: + all_hidden_states += (hidden_states,) + + outputs = [ + hidden_states, + all_hidden_states, + all_self_attns, + all_cross_attentions, + ] + + if not return_dict: + return tuple(v for v in outputs if v is not None) + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + hidden_states=all_hidden_states, + attentions=all_self_attns, + cross_attentions=all_cross_attentions, + ) + + +class FlaxBartEncoder(nn.Module): + config: DalleBartConfig + embed_tokens: nn.Embed + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + """ + Edits: + - offset set to 0 (no padding token) + - use max_text_length instead of max_position_embeddings + - use custom FlaxBartEncoderLayerCollection + - embed_tokens cannot be None (issue at compile time) + """ + + def setup(self): + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + + embed_dim = self.config.d_model + self.padding_idx = self.config.pad_token_id + self.embed_scale = math.sqrt(embed_dim) if self.config.scale_embedding else 1.0 + + # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 0 + if self.config.use_absolute_position_embeddings: + self.embed_positions = nn.Embed( + self.config.max_text_length + self.offset, # image length for BOS + embed_dim, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + ) + self.layers = FlaxBartEncoderLayerCollection(self.config, self.dtype) + self.layernorm_embedding = norm( + self.config.ln_type, dtype=self.dtype, epsilon=1e-05 + ) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + input_shape = input_ids.shape + input_ids = input_ids.reshape(-1, input_shape[-1]) + + hidden_states = self.embed_tokens(input_ids) * self.embed_scale + + if self.config.use_absolute_position_embeddings: + embed_pos = self.embed_positions(position_ids + self.offset) + hidden_states = hidden_states + embed_pos + + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + + outputs = self.layers( + hidden_states, + attention_mask, + deterministic=deterministic, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return outputs + + return FlaxBaseModelOutput( + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + ) + + +class FlaxBartDecoder(nn.Module): + config: DalleBartConfig + embed_tokens: nn.Embed + dtype: jnp.dtype = jnp.float32 # the dtype of the computation + """ + Edits: + - offset set to 0 (no padding token) + - use image_length instead of max_position_embeddings + - use custom FlaxBartDecoderLayerCollection + - embed_tokens cannot be None (issue at compile time) + """ + + def setup(self): + self.dropout_layer = nn.Dropout(rate=self.config.dropout) + + embed_dim = self.config.d_model + self.padding_idx = self.config.pad_token_id + self.embed_scale = ( + math.sqrt(self.config.d_model) if self.config.scale_embedding else 1.0 + ) + + # Bart is set up so that if padding_idx is specified then offset the embedding ids by 2 + # and adjust num_embeddings appropriately. Other models don't have this hack + self.offset = 0 + if self.config.use_absolute_position_embeddings: + self.embed_positions = nn.Embed( + self.config.image_length + self.offset, # image length for BOS + embed_dim, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + ) + + self.layers = FlaxBartDecoderLayerCollection(self.config, self.dtype) + self.layernorm_embedding = norm( + self.config.ln_type, dtype=self.dtype, epsilon=1e-05 + ) + + def __call__( + self, + input_ids, + attention_mask, + position_ids, + encoder_hidden_states: Optional[jnp.ndarray] = None, + encoder_attention_mask: Optional[jnp.ndarray] = None, + init_cache: bool = False, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + input_shape = input_ids.shape + input_ids = input_ids.reshape(-1, input_shape[-1]) + + hidden_states = self.embed_tokens(input_ids) * self.embed_scale + + if self.config.use_absolute_position_embeddings: + embed_pos = self.embed_positions(position_ids + self.offset) + hidden_states = hidden_states + embed_pos + + hidden_states = self.layernorm_embedding(hidden_states) + hidden_states = self.dropout_layer(hidden_states, deterministic=deterministic) + + outputs = self.layers( + hidden_states, + attention_mask, + encoder_hidden_states, + encoder_attention_mask, + deterministic=deterministic, + init_cache=init_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + ) + + if not return_dict: + return outputs + + return FlaxBaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=outputs.last_hidden_state, + hidden_states=outputs.hidden_states, + attentions=outputs.attentions, + cross_attentions=outputs.cross_attentions, + ) + + +class FlaxBartModule(FlaxBartModule): + """ + Edits + - use custom FlaxBartEncoder & FlaxBartDecoder + - use separate embeddings for Encoder & Decoder + """ + + def setup(self): + encoder_embed_tokens = nn.Embed( + self.config.encoder_vocab_size, + self.config.d_model, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + ) + decoder_embed_tokens = nn.Embed( + self.config.image_vocab_size + 1, # image vocab size + 1 for BOS + self.config.d_model, + embedding_init=jax.nn.initializers.normal(self.config.init_std), + ) + + self.encoder = FlaxBartEncoder( + self.config, dtype=self.dtype, embed_tokens=encoder_embed_tokens + ) + self.decoder = FlaxBartDecoder( + self.config, dtype=self.dtype, embed_tokens=decoder_embed_tokens + ) + + +class FlaxBartPreTrainedModel(FlaxBartPreTrainedModel): + """ + Edits: + - added num_params property + - config_class replaced to DalleBartConfig + - __init__ accepts abstract_init which does uses parameter shape to initialize the model + - init weights on CPU with `load_on_cpu` + - restore weights on CPU with custom `from_pretrained` + """ + + config_class = DalleBartConfig + + def __init__( + self, + config: DalleBartConfig, + input_shape: Tuple[int] = (1, 1), + seed: int = 0, + dtype: jnp.dtype = jnp.float32, + abstract_init: bool = False, + load_on_cpu: bool = False, + init_weights: bool = True, + **kwargs, + ): + module = self.module_class(config=config, dtype=dtype, **kwargs) + + # adapted from HuggingFace FlaxPreTrainedModel + if config is None: + raise ValueError("config cannot be None") + + if module is None: + raise ValueError("module cannot be None") + + # Those are private to be exposed as typed property on derived classes. + self._config = config + self._module = module + + # Those are public as their type is generic to every derived classes. + self.key = PRNGKey(seed) + self.dtype = dtype + + if init_weights: + # get shape of params only + random_params = self.init_weights( + self.key, + input_shape, + abstract_init=abstract_init, + load_on_cpu=load_on_cpu, + ) + + # save required_params as set + self._required_params = set(flatten_dict(unfreeze(random_params)).keys()) + self.params = random_params + + def init_weights( + self, rng=None, input_shape=(1, 1), abstract_init=False, load_on_cpu=False + ): + if rng is None: + rng = self.key + init_fn = super().init_weights + if load_on_cpu: + init_fn = jax.jit(init_fn, static_argnums=(1,), backend="cpu") + if abstract_init: + # only set shape and dtype, load parameters separately + init_fn = partial(init_fn, input_shape=input_shape) + params = jax.eval_shape(init_fn, rng) + else: + params = init_fn(rng, input_shape) + return params + + @property + def num_params(self): + num_params = jax.tree_map( + lambda param: param.size, flatten_dict(unfreeze(self.params)) + ).values() + return sum(list(num_params)) + + @classmethod + def from_pretrained( + cls, + pretrained_model_name_or_path: Union[str, os.PathLike], + dtype: jnp.dtype = jnp.float32, + *model_args, + **kwargs, + ): + config = kwargs.pop("config", None) + cache_dir = kwargs.pop("cache_dir", None) + from_pt = kwargs.pop("from_pt", False) + ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False) + force_download = kwargs.pop("force_download", False) + resume_download = kwargs.pop("resume_download", False) + proxies = kwargs.pop("proxies", None) + local_files_only = kwargs.pop("local_files_only", False) + use_auth_token = kwargs.pop("use_auth_token", None) + revision = kwargs.pop("revision", None) + from_pipeline = kwargs.pop("_from_pipeline", None) + from_auto_class = kwargs.pop("_from_auto", False) + + user_agent = { + "file_type": "model", + "framework": "flax", + "from_auto_class": from_auto_class, + } + if from_pipeline is not None: + user_agent["using_pipeline"] = from_pipeline + + if is_offline_mode() and not local_files_only: + logger.info("Offline mode: forcing local_files_only=True") + local_files_only = True + + # Load config if we don't provide a configuration + if not isinstance(config, PretrainedConfig): + config_path = ( + config if config is not None else pretrained_model_name_or_path + ) + config, model_kwargs = cls.config_class.from_pretrained( + config_path, + cache_dir=cache_dir, + return_unused_kwargs=True, + force_download=force_download, + resume_download=resume_download, + proxies=proxies, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + revision=revision, + _from_auto=from_auto_class, + _from_pipeline=from_pipeline, + **kwargs, + ) + else: + model_kwargs = kwargs + + # Add the dtype to model_kwargs + model_kwargs["dtype"] = dtype + + # Load model + if pretrained_model_name_or_path is not None: + if os.path.isdir(pretrained_model_name_or_path): + if from_pt and os.path.isfile( + os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME) + ): + # Load from a PyTorch checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, WEIGHTS_NAME + ) + elif os.path.isfile( + os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME) + ): + # Load from a Flax checkpoint + archive_file = os.path.join( + pretrained_model_name_or_path, FLAX_WEIGHTS_NAME + ) + else: + raise EnvironmentError( + f"Error no file named {[FLAX_WEIGHTS_NAME, WEIGHTS_NAME]} found in directory " + f"{pretrained_model_name_or_path} or `from_pt` set to False" + ) + elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url( + pretrained_model_name_or_path + ): + archive_file = pretrained_model_name_or_path + else: + archive_file = hf_bucket_url( + pretrained_model_name_or_path, + filename=WEIGHTS_NAME if from_pt else FLAX_WEIGHTS_NAME, + revision=revision, + ) + + # redirect to the cache, if necessary + try: + resolved_archive_file = cached_path( + archive_file, + cache_dir=cache_dir, + force_download=force_download, + proxies=proxies, + resume_download=resume_download, + local_files_only=local_files_only, + use_auth_token=use_auth_token, + user_agent=user_agent, + ) + except EnvironmentError as err: + logger.error(err) + msg = ( + f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n" + f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n" + f" (make sure '{pretrained_model_name_or_path}' is not a path to a local directory with something else, in that case)\n\n" + f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named {WEIGHTS_NAME}.\n\n" + ) + raise EnvironmentError(msg) + + if resolved_archive_file == archive_file: + logger.info(f"loading weights file {archive_file}") + else: + logger.info( + f"loading weights file {archive_file} from cache at {resolved_archive_file}" + ) + else: + resolved_archive_file = None + + # init random models + model = cls(config, *model_args, **model_kwargs) + + with open(resolved_archive_file, "rb") as state_f: + try: + state = from_bytes(cls, state_f.read()) + except (UnpicklingError, msgpack.exceptions.ExtraData) as e: + try: + with open(resolved_archive_file) as f: + if f.read().startswith("version"): + raise OSError( + "You seem to have cloned a repository without having git-lfs installed. Please install " + "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder " + "you cloned." + ) + else: + raise ValueError from e + except (UnicodeDecodeError, ValueError): + raise EnvironmentError( + f"Unable to convert {archive_file} to Flax deserializable object. " + ) + + # if model is base model only use model_prefix key + if ( + cls.base_model_prefix not in dict(model.params) + and cls.base_model_prefix in state + ): + state = state[cls.base_model_prefix] + + # if model is head model and we are loading weights from base model + # we initialize new params dict with base_model_prefix + if ( + cls.base_model_prefix in dict(model.params) + and cls.base_model_prefix not in state + ): + state = {cls.base_model_prefix: state} + + # flatten dicts + state = flatten_dict(state) + + random_state = flatten_dict(unfreeze(model.params)) + + missing_keys = model.required_params - set(state.keys()) + unexpected_keys = set(state.keys()) - model.required_params + + # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not + # matching the weights in the model. + mismatched_keys = [] + for key in state.keys(): + if key in random_state and state[key].shape != random_state[key].shape: + if ignore_mismatched_sizes: + mismatched_keys.append( + (key, state[key].shape, random_state[key].shape) + ) + state[key] = random_state[key] + else: + raise ValueError( + f"Trying to load the pretrained weight for {key} failed: checkpoint has shape " + f"{state[key].shape} which is incompatible with the model shape {random_state[key].shape}. " + "Using `ignore_mismatched_sizes=True` if you really want to load this checkpoint inside this " + "model." + ) + + # add missing keys as random parameters + for missing_key in missing_keys: + state[missing_key] = random_state[missing_key] + + # remove unexpected keys to not be saved again + for unexpected_key in unexpected_keys: + del state[unexpected_key] + + if len(unexpected_keys) > 0: + logger.warning( + f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when " + f"initializing {model.__class__.__name__}: {unexpected_keys}\n" + f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task " + f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n" + f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect " + f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)." + ) + else: + logger.info( + f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n" + ) + + if len(missing_keys) > 0: + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " + f"and are newly initialized: {missing_keys}\n" + f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + elif len(mismatched_keys) == 0: + logger.info( + f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n" + f"If your task is similar to the task the model of the checkpoint was trained on, " + f"you can already use {model.__class__.__name__} for predictions without further training." + ) + if len(mismatched_keys) > 0: + mismatched_warning = "\n".join( + [ + f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated" + for key, shape1, shape2 in mismatched_keys + ] + ) + logger.warning( + f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} " + f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n" + f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference." + ) + + # set correct parameters + model.params = unflatten_dict(state) + + return model + + +class FlaxBartForConditionalGenerationModule(FlaxBartForConditionalGenerationModule): + """ + Edits: + - no bias + - lm_head set to image_vocab_size + 1 (for BOS) + - uses custom FlaxBartModule + """ + + def setup(self): + self.model = FlaxBartModule(config=self.config, dtype=self.dtype) + self.lm_head = nn.Dense( + self.config.image_vocab_size + + 1, # image vocab size + 1 for BOS to have same size as decoder inputs (for sharding) + use_bias=False, + dtype=self.dtype, + kernel_init=jax.nn.initializers.normal(self.config.init_std), + ) + + def __call__( + self, + input_ids, + attention_mask, + decoder_input_ids, + decoder_attention_mask, + position_ids, + decoder_position_ids, + output_attentions: bool = False, + output_hidden_states: bool = False, + return_dict: bool = True, + deterministic: bool = True, + ): + outputs = self.model( + input_ids=input_ids, + attention_mask=attention_mask, + decoder_input_ids=decoder_input_ids, + decoder_attention_mask=decoder_attention_mask, + position_ids=position_ids, + decoder_position_ids=decoder_position_ids, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=deterministic, + ) + + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_embedding = self.model.variables["params"]["shared"]["embedding"] + lm_logits = self.lm_head.apply( + {"params": {"kernel": shared_embedding.T}}, hidden_states + ) + else: + lm_logits = self.lm_head(hidden_states) + + if not return_dict: + output = (lm_logits,) + outputs[1:] + return output + + return FlaxSeq2SeqLMOutput( + logits=lm_logits, + decoder_hidden_states=outputs.decoder_hidden_states, + decoder_attentions=outputs.decoder_attentions, + cross_attentions=outputs.cross_attentions, + encoder_last_hidden_state=outputs.encoder_last_hidden_state, + encoder_hidden_states=outputs.encoder_hidden_states, + encoder_attentions=outputs.encoder_attentions, + ) + + +@flax.struct.dataclass +class SampleState: + cur_len: jnp.ndarray + sequences: jnp.ndarray + running_token: jnp.ndarray + is_sent_finished: jnp.ndarray + prng_key: jnp.ndarray + model_kwargs: Dict[str, jnp.ndarray] + model_kwargs_uncond: Dict[str, jnp.ndarray] + + +class DalleBart( + PretrainedFromWandbMixin, FlaxBartPreTrainedModel, FlaxBartForConditionalGeneration +): + """ + Edits: + - renamed from FlaxBartForConditionalGeneration + - uses custom FlaxBartPreTrainedModel + - uses custom FlaxBartForConditionalGenerationModule + - no bias in decode method + - custom prepare_inputs_for_generation using "max_length - 1" to avoid issues + related to position embedding during model.generate() + - custom generate method to allow super conditions + """ + + module_class = FlaxBartForConditionalGenerationModule + + def decode( + self, + decoder_input_ids, + encoder_outputs, + encoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_attention_mask: Optional[jnp.ndarray] = None, + decoder_position_ids: Optional[jnp.ndarray] = None, + past_key_values: dict = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + train: bool = False, + params: dict = None, + dropout_rng: PRNGKey = None, + ): + output_attentions = ( + output_attentions + if output_attentions is not None + else self.config.output_attentions + ) + output_hidden_states = ( + output_hidden_states + if output_hidden_states is not None + else self.config.output_hidden_states + ) + return_dict = ( + return_dict if return_dict is not None else self.config.return_dict + ) + + encoder_hidden_states = encoder_outputs[0] + if encoder_attention_mask is None: + batch_size, sequence_length = encoder_hidden_states.shape[:2] + encoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + batch_size, sequence_length = decoder_input_ids.shape + if decoder_attention_mask is None: + decoder_attention_mask = jnp.ones((batch_size, sequence_length)) + + if decoder_position_ids is None: + if past_key_values is not None: + raise ValueError( + "Make sure to provide `decoder_position_ids` when passing `past_key_values`." + ) + + decoder_position_ids = jnp.broadcast_to( + jnp.arange(sequence_length)[None, :], (batch_size, sequence_length) + ) + + # Handle any PRNG if needed + rngs = {} + if dropout_rng is not None: + rngs["dropout"] = dropout_rng + + inputs = {"params": params or self.params} + + # if past_key_values are passed then cache is already initialized a private flag init_cache has to be + # passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that + # it can be changed by FlaxBartAttention module + if past_key_values: + inputs["cache"] = past_key_values + mutable = ["cache"] + else: + mutable = False + + def _decoder_forward( + module, + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + **kwargs, + ): + decoder_module = module._get_decoder_module() + outputs = decoder_module( + decoder_input_ids, + decoder_attention_mask, + decoder_position_ids, + **kwargs, + ) + hidden_states = outputs[0] + + if self.config.tie_word_embeddings: + shared_embedding = module.model.variables["params"]["shared"][ + "embedding" + ] + lm_logits = module.lm_head.apply( + {"params": {"kernel": shared_embedding.T}}, hidden_states + ) + else: + lm_logits = module.lm_head(hidden_states) + + return lm_logits, outputs + + outputs = self.module.apply( + inputs, + decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"), + decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"), + decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"), + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=jnp.array(encoder_attention_mask, dtype="i4"), + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + deterministic=not train, + rngs=rngs, + mutable=mutable, + method=_decoder_forward, + ) + + if past_key_values is None: + lm_logits, decoder_outputs = outputs + else: + (lm_logits, decoder_outputs), past = outputs + + if return_dict: + outputs = FlaxCausalLMOutputWithCrossAttentions( + logits=lm_logits, + hidden_states=decoder_outputs.hidden_states, + attentions=decoder_outputs.attentions, + cross_attentions=decoder_outputs.cross_attentions, + ) + else: + outputs = (lm_logits,) + decoder_outputs[1:] + + # add updated cache to model output + if past_key_values is not None and return_dict: + outputs["past_key_values"] = unfreeze(past["cache"]) + return outputs + elif past_key_values is not None and not return_dict: + outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:] + + return outputs + + def prepare_inputs_for_generation( + self, + decoder_input_ids, + max_length, + attention_mask: Optional[jnp.DeviceArray] = None, + decoder_attention_mask: Optional[jnp.DeviceArray] = None, + encoder_outputs=None, + **kwargs, + ): + # initializing the cache + batch_size, seq_length = decoder_input_ids.shape + + past_key_values = self.init_cache(batch_size, max_length - 1, encoder_outputs) + # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. + # But since the decoder uses a causal mask, those positions are masked anyways. + # Thus we can create a single static attention_mask here, which is more efficient for compilation + extended_attention_mask = jnp.ones((batch_size, max_length - 1), dtype="i4") + if decoder_attention_mask is not None: + position_ids = decoder_attention_mask.cumsum(axis=-1) - 1 + extended_attention_mask = lax.dynamic_update_slice( + extended_attention_mask, decoder_attention_mask, (0, 0) + ) + else: + position_ids = jnp.broadcast_to( + jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length) + ) + + return { + "past_key_values": past_key_values, + "encoder_outputs": encoder_outputs, + "encoder_attention_mask": attention_mask, + "decoder_attention_mask": extended_attention_mask, + "decoder_position_ids": position_ids, + } + + def generate( + self, + input_ids: jnp.ndarray, + attention_mask: Optional[jnp.ndarray] = None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + bos_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + decoder_start_token_id: Optional[int] = None, + do_sample: Optional[bool] = None, + prng_key: Optional[jnp.ndarray] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + temperature: Optional[float] = None, + num_beams: Optional[int] = None, + no_repeat_ngram_size: Optional[int] = None, + min_length: Optional[int] = None, + forced_bos_token_id: Optional[int] = None, + forced_eos_token_id: Optional[int] = None, + length_penalty: Optional[float] = None, + early_stopping: Optional[bool] = None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + condition_scale: Optional[float] = 1.0, + input_ids_uncond: Optional[jnp.ndarray] = None, + attention_mask_uncond: Optional[jnp.ndarray] = None, + **model_kwargs, + ): + """Edit: Allow super conditioning.""" + + # set init values + max_length = max_length if max_length is not None else self.config.max_length + bos_token_id = ( + bos_token_id if bos_token_id is not None else self.config.bos_token_id + ) + pad_token_id = ( + pad_token_id if pad_token_id is not None else self.config.pad_token_id + ) + eos_token_id = ( + eos_token_id if eos_token_id is not None else self.config.eos_token_id + ) + decoder_start_token_id = ( + decoder_start_token_id + if decoder_start_token_id + else self.config.decoder_start_token_id + ) + prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) + + if decoder_start_token_id is None and self.config.is_encoder_decoder: + raise ValueError( + "`decoder_start_token_id` has to be defined for encoder-decoder generation." + ) + + do_sample = do_sample if do_sample is not None else self.config.do_sample + num_beams = num_beams if num_beams is not None else self.config.num_beams + + if self.config.is_encoder_decoder: + # add encoder_outputs to model_kwargs + if model_kwargs.get("encoder_outputs") is None: + model_kwargs_input = dict(model_kwargs) + model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation( + input_ids, + params, + {"attention_mask": attention_mask, **model_kwargs_input}, + ) + if condition_scale != 1.0: + assert ( + input_ids_uncond is not None + ), "`input_ids_uncond` has to be defined for super conditioning." + assert ( + do_sample is True + ), "`do_sample` has to be True for super conditioning." + assert ( + num_beams == 1 + ), "`num_beams` has to be 1 for super conditioning." + model_kwargs_uncond = ( + self._prepare_encoder_decoder_kwargs_for_generation( + input_ids_uncond, + params, + { + "attention_mask": attention_mask_uncond, + **model_kwargs_input, + }, + ) + ) + else: + model_kwargs_uncond = None + # prepare decoder_input_ids for generation + input_ids = ( + jnp.ones((input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id + ) + + if not do_sample and num_beams == 1: + logits_processor = self._get_logits_processor( + no_repeat_ngram_size, + min_length, + max_length, + eos_token_id, + forced_bos_token_id, + forced_eos_token_id, + ) + return self._greedy_search( + input_ids, + max_length, + pad_token_id, + eos_token_id, + logits_processor=logits_processor, + trace=trace, + params=params, + model_kwargs=model_kwargs, + ) + elif do_sample and num_beams == 1: + logits_warper = self._get_logits_warper( + top_k=top_k, top_p=top_p, temperature=temperature + ) + logits_processor = self._get_logits_processor( + no_repeat_ngram_size, + min_length, + max_length, + eos_token_id, + forced_bos_token_id, + forced_eos_token_id, + ) + return self._sample( + input_ids, + max_length, + pad_token_id, + eos_token_id, + prng_key, + logits_warper=logits_warper, + logits_processor=logits_processor, + trace=trace, + params=params, + model_kwargs=model_kwargs, + condition_scale=condition_scale, + model_kwargs_uncond=model_kwargs_uncond, + ) + elif not do_sample and num_beams > 1: + # broadcast input_ids & encoder_outputs + input_ids = self._expand_to_num_beams(input_ids, num_beams=num_beams) + + if "encoder_outputs" in model_kwargs: + model_kwargs["encoder_outputs"][ + "last_hidden_state" + ] = self._expand_to_num_beams( + model_kwargs["encoder_outputs"]["last_hidden_state"], + num_beams=num_beams, + ) + + if "attention_mask" in model_kwargs: + model_kwargs["attention_mask"] = self._expand_to_num_beams( + model_kwargs["attention_mask"], num_beams=num_beams + ) + + logits_processor = self._get_logits_processor( + no_repeat_ngram_size, + min_length, + max_length, + eos_token_id, + forced_bos_token_id, + forced_eos_token_id, + ) + + return self._beam_search( + input_ids, + max_length, + pad_token_id, + eos_token_id, + length_penalty=length_penalty, + early_stopping=early_stopping, + logits_processor=logits_processor, + trace=trace, + params=params, + model_kwargs=model_kwargs, + ) + else: + raise NotImplementedError("`Beam sampling is currently not implemented.") + + def _sample( + self, + input_ids: None, + max_length: Optional[int] = None, + pad_token_id: Optional[int] = None, + eos_token_id: Optional[int] = None, + prng_key: Optional[jnp.ndarray] = None, + logits_processor=None, + logits_warper=None, + trace: bool = True, + params: Optional[Dict[str, jnp.ndarray]] = None, + model_kwargs: Optional[Dict[str, jnp.ndarray]] = None, + condition_scale: float = 1.0, + model_kwargs_uncond: Optional[Dict[str, jnp.ndarray]] = None, + ): + # init values + max_length = max_length if max_length is not None else self.config.max_length + pad_token_id = ( + pad_token_id if pad_token_id is not None else self.config.pad_token_id + ) + eos_token_id = ( + eos_token_id if eos_token_id is not None else self.config.eos_token_id + ) + prng_key = prng_key if prng_key is not None else jax.random.PRNGKey(0) + + batch_size, cur_len = input_ids.shape + + eos_token_id = jnp.array(eos_token_id) + pad_token_id = jnp.array(pad_token_id) + cur_len = jnp.array(cur_len) + + # per batch-item holding current token in loop. + sequences = jnp.full((batch_size, max_length), pad_token_id, dtype=jnp.int32) + sequences = lax.dynamic_update_slice(sequences, input_ids, (0, 0)) + + # per batch-item state bit indicating if sentence has finished. + is_sent_finished = jnp.zeros((batch_size,), dtype=jnp.bool_) + + # For Seq2Seq generation, we only need to use the decoder instead of the whole model in generation loop + # and pass it the `encoder_outputs`, which are part of the `model_kwargs`. + model = self.decode if self.config.is_encoder_decoder else self + + # initialize model specific kwargs + model_kwargs = self.prepare_inputs_for_generation( + input_ids, max_length, **model_kwargs + ) + if condition_scale != 1.0: + model_kwargs_uncond = self.prepare_inputs_for_generation( + input_ids, max_length, **model_kwargs_uncond + ) + + # initialize state + state = SampleState( + cur_len=cur_len, + sequences=sequences, + running_token=input_ids, + is_sent_finished=is_sent_finished, + prng_key=prng_key, + model_kwargs=model_kwargs, + model_kwargs_uncond=model_kwargs_uncond, + ) + + def sample_search_cond_fn(state): + """state termination condition fn.""" + has_reached_max_length = state.cur_len == max_length + all_sequence_finished = jnp.all(state.is_sent_finished) + finish_generation = jnp.logical_or( + has_reached_max_length, all_sequence_finished + ) + return ~finish_generation + + def sample_search_body_fn(state): + """state update fn.""" + prng_key, prng_key_next = jax.random.split(state.prng_key) + model_outputs = model( + state.running_token, params=params, **state.model_kwargs + ) + + logits = model_outputs.logits[:, -1] + + # perform super conditioning + # Source: @RiversHaveWings - https://twitter.com/RiversHaveWings/status/1478093658716966912?s=20&t=xdm-wZ61Wf7OLnE_NJHZ1w + if condition_scale != 1.0: + model_outputs_uncond = model( + state.running_token, params=params, **state.model_kwargs_uncond + ) + logits_uncond = model_outputs_uncond.logits[:, -1] + logits = logits_uncond + condition_scale * (logits - logits_uncond) + else: + model_outputs_uncond = None + + # apply min_length, ... + logits = logits_processor(state.sequences, logits, state.cur_len) + # apply top_k, top_k, temperature + logits = logits_warper(logits, logits, state.cur_len) + + next_token = jax.random.categorical(prng_key, logits, axis=-1) + + next_is_sent_finished = state.is_sent_finished | ( + next_token == eos_token_id + ) + next_token = ( + next_token * ~next_is_sent_finished + + pad_token_id * next_is_sent_finished + ) + next_token = next_token[:, None] + + next_sequences = lax.dynamic_update_slice( + state.sequences, next_token, (0, state.cur_len) + ) + next_model_kwargs = self.update_inputs_for_generation( + model_outputs, state.model_kwargs + ) + next_model_kwargs_uncond = ( + self.update_inputs_for_generation( + model_outputs_uncond, state.model_kwargs_uncond + ) + if condition_scale != 1.0 + else None + ) + + return SampleState( + cur_len=state.cur_len + 1, + sequences=next_sequences, + running_token=next_token, + is_sent_finished=next_is_sent_finished, + model_kwargs=next_model_kwargs, + model_kwargs_uncond=next_model_kwargs_uncond, + prng_key=prng_key_next, + ) + + # The very first prompt often has sequence length > 1, so run outside of `lax.while_loop` to comply with TPU + if input_ids.shape[1] > 1: + state = sample_search_body_fn(state) + + if not trace: + state = self._run_loop_in_debug( + sample_search_cond_fn, sample_search_body_fn, state + ) + else: + state = lax.while_loop(sample_search_cond_fn, sample_search_body_fn, state) + + return FlaxSampleOutput(sequences=state.sequences) diff --git a/src/dalle_mini/model/partitions.py b/src/dalle_mini/model/partitions.py new file mode 100644 index 0000000000000000000000000000000000000000..22b187d5874f0c9d7b04b009387d7ce9c339a01d --- /dev/null +++ b/src/dalle_mini/model/partitions.py @@ -0,0 +1,67 @@ +import re + +from flax.core.frozen_dict import freeze +from flax.traverse_util import flatten_dict, unflatten_dict +from jax.experimental import PartitionSpec as P + +# utils adapted from https://github.com/google-research/google-research/blob/master/flax_models/t5x/partitions.py +# Sentinels +_unmatched = object() + +# For specifying empty leaf dict `{}` +empty_dict = object() + + +def _match(qs, ks): + """Return True if regexes in qs match any window of strings in tuple ks.""" + # compile regexes and force complete match + qts = tuple(map(lambda x: re.compile(x + "$"), qs)) + for i in range(len(ks) - len(qs) + 1): + matches = [x.match(y) for x, y in zip(qts, ks[i:])] + if matches and all(matches): + return True + return False + + +def _replacement_rules(rules): + def replace(key, val): + for rule, replacement in rules: + if _match(rule, key): + return replacement + return val + + return replace + + +def _get_partition_rules(): + return [ + # embeddings + (("embed_positions", "embedding"), P("mp", None)), + (("embed_tokens", "embedding"), P("mp", None)), + (("rel_bias", "embedding"), P(None, "mp")), + # attention + (("(q_proj|k_proj|v_proj)", "kernel"), P(None, "mp")), + (("out_proj", "kernel"), P("mp", None)), + # FFN + (("Dense_0", "kernel"), P(None, "mp")), + (("GLU.*", "Dense_1", "kernel"), P(None, "mp")), + (("GLU.*", "Dense_2", "kernel"), P("mp", None)), + (("FFN.*", "Dense_1", "kernel"), P("mp", None)), + # layer norms + (("(bias|scale)",), None), + (("lm_head", "kernel"), P(None, "mp")), + # head scale and tau + (("(head_scale|tau)",), None), + ] + + +def set_partitions(in_dict): + rules = _get_partition_rules() + replace = _replacement_rules(rules) + initd = {k: _unmatched for k in flatten_dict(in_dict)} + result = {k: replace(k, v) for k, v in initd.items()} + for k, v in result.items(): + if v == _unmatched: + print(f"Unmatched -> {k}") + assert _unmatched not in result.values(), "Incomplete partition spec." + return freeze(unflatten_dict(result)) diff --git a/src/dalle_mini/model/processor.py b/src/dalle_mini/model/processor.py new file mode 100644 index 0000000000000000000000000000000000000000..0dedcbe02d6554ff17964e8cfdce13b144f6925f --- /dev/null +++ b/src/dalle_mini/model/processor.py @@ -0,0 +1,58 @@ +""" DalleBart processor """ + +import jax.numpy as jnp + +from .configuration import DalleBartConfig +from .text import TextNormalizer +from .tokenizer import DalleBartTokenizer +from .utils import PretrainedFromWandbMixin + + +class DalleBartProcessorBase: + def __init__( + self, tokenizer: DalleBartTokenizer, normalize_text: bool, max_text_length: int + ): + self.tokenizer = tokenizer + self.normalize_text = normalize_text + self.max_text_length = max_text_length + if normalize_text: + self.text_processor = TextNormalizer() + # create unconditional tokens + uncond = self.tokenizer( + "", + return_tensors="jax", + padding="max_length", + truncation=True, + max_length=self.max_text_length, + ).data + self.input_ids_uncond = uncond["input_ids"] + self.attention_mask_uncond = uncond["attention_mask"] + + def __call__(self, text: str = None): + # check that text is not a string + assert not isinstance(text, str), "text must be a list of strings" + + if self.normalize_text: + text = [self.text_processor(t) for t in text] + res = self.tokenizer( + text, + return_tensors="jax", + padding="max_length", + truncation=True, + max_length=self.max_text_length, + ).data + # tokens used only with super conditioning + n = len(text) + res["input_ids_uncond"] = jnp.repeat(self.input_ids_uncond, n, axis=0) + res["attention_mask_uncond"] = jnp.repeat(self.attention_mask_uncond, n, axis=0) + return res + + @classmethod + def from_pretrained(cls, *args, **kwargs): + tokenizer = DalleBartTokenizer.from_pretrained(*args, **kwargs) + config = DalleBartConfig.from_pretrained(*args, **kwargs) + return cls(tokenizer, config.normalize_text, config.max_text_length) + + +class DalleBartProcessor(PretrainedFromWandbMixin, DalleBartProcessorBase): + pass diff --git a/src/dalle_mini/model/text.py b/src/dalle_mini/model/text.py new file mode 100644 index 0000000000000000000000000000000000000000..ab98058b911301b217c7789de1f7eba446684e0d --- /dev/null +++ b/src/dalle_mini/model/text.py @@ -0,0 +1,262 @@ +""" +Utilities for processing text. +""" + +import html +import math +import random +import re +from pathlib import Path + +import emoji +import ftfy +from huggingface_hub import hf_hub_download +from unidecode import unidecode + +# based on wiki word occurence +person_token = [("a person", 282265), ("someone", 121194), ("somebody", 12219)] +temp_token = "xtokx" # avoid repeating chars + + +class HashtagProcessor: + # Adapted from wordninja library + # We use our wikipedia word count + a good heuristic to make it work + def __init__(self): + wiki_word_frequency = hf_hub_download( + "dalle-mini/dalle-mini", filename="enwiki-words-frequency.txt" + ) + self._word_cost = ( + l.split()[0] + for l in Path(wiki_word_frequency).read_text(encoding="utf8").splitlines() + ) + self._word_cost = { + str(k): math.log(float(i + 1)) for i, k in enumerate(self._word_cost) + } + self._max_word = max(len(x) for x in self._word_cost.keys()) + self._SPLIT_RE = re.compile("[^a-zA-Z0-9']+") + + def __call__(self, s): + """Uses dynamic programming to infer the location of spaces in a string without spaces.""" + l = [self._split(x) for x in self._SPLIT_RE.split(s)] + return " ".join([item for sublist in l for item in sublist]) + + def _split(self, s): + # Find the best match for the i first characters, assuming cost has + # been built for the i-1 first characters. + # Returns a pair (match_cost, match_length). + def best_match(i): + candidates = enumerate(reversed(cost[max(0, i - self._max_word) : i])) + return min( + (c + self._word_cost.get(s[i - k - 1 : i].lower(), 9e999), k + 1) + for k, c in candidates + ) + + # Build the cost array + cost = [0] + for i in range(1, len(s) + 1): + c, k = best_match(i) + cost.append(c) + + # Backtrack to recover the minimal-cost string. + out = [] + i = len(s) + while i > 0: + c, k = best_match(i) + assert c == cost[i] + newToken = True + if not s[i - k : i] == "'": # ignore a lone apostrophe + if len(out) > 0: + # re-attach split 's and split digits + if out[-1] == "'s" or ( + s[i - 1].isdigit() and out[-1][0].isdigit() + ): # digit followed by digit + out[-1] = ( + s[i - k : i] + out[-1] + ) # combine current token with previous token + newToken = False + + if newToken: + out.append(s[i - k : i]) + + i -= k + + return reversed(out) + + +def replace_person_token(t): + "Used for CC12M" + t = re.sub("([,\s]*(and)*[,\s]*)+", " people ", t) + while "" in t: + t = t.replace( + "", f" {random.choices(*tuple(zip(*person_token)))[0]} ", 1 + ) + return t + + +def fix_html(t): + # from OpenAI CLIP + return html.unescape(html.unescape(t)) + + +def replace_punctuation_with_commas(t): + return re.sub("[()[\].,|:;?!=+~\-\/{}]", ",", t) + + +def simplify_quotes(t): + return re.sub("""['"`]""", ' " ', t) + + +def merge_quotes(t): + return re.sub('(\s*"+\s*)+', ' " ', t) + + +def remove_comma_numbers(t): + def _f(t): + return re.sub("(\d),(\d{3})", r"\1\2", t) + + return _f(_f(t)) + + +def pre_process_dot_numbers(t): + return re.sub("(\w)\.(\w)", rf"\1{temp_token}dot{temp_token}\2", t) + + +def post_process_dot_numbers(t): + return re.sub(f"{temp_token}dot{temp_token}", ".", t) + + +def pre_process_quotes(t): + # allows quotes only for 's, 't, 'd, 'm, 'll, 're, 've + return re.sub( + r"'(?=([stdm]|(ll)|(re)|(ve)|(ll))\b)", rf"{temp_token}quote{temp_token}", t + ) + + +def post_process_quotes(t): + return re.sub(f"{temp_token}quote{temp_token}", "'", t) + + +def pre_process_dates(t): + return re.sub("(\d)/(\d)", rf"\1{temp_token}slash{temp_token}\2", t) + + +def post_process_dates(t): + return re.sub(f"{temp_token}slash{temp_token}", "/", t) + + +def merge_commas(t): + return re.sub("(\s*,+\s*)+", ", ", t) + + +def add_space_after_commas(t): + return re.sub(",", ", ", t) + + +def handle_special_chars(t): + "Handle special characters" + # replace "-" with a space when between words without space + t = re.sub("(\w)-(\w)", r"\1 \2", t) + # always add space around some characters + return re.sub("([%&\/$*])", r" \1 ", t) + + +def expand_hashtags(t, hashtag_processor): + "Remove # and try to split words" + return re.sub("#(\w+)", lambda m: hashtag_processor(m.group(1)), t) + + +_re_ignore_chars = r"[_#\\]" + + +def ignore_chars(t): + "Ignore useless characters" + return re.sub(_re_ignore_chars, " ", t) + + +def remove_extra_spaces(t): + "Remove extra spaces (including \t and \n)" + return re.sub("\s+", " ", t) + + +def remove_repeating_chars(t): + "If the same character is present 4+ times (not 3 because of roman 'VIII'), replace with single instance" + return re.sub(r"(\D)(\1{3,})", r"\1", t) + + +def remove_urls(t): + return re.sub(r"http\S+", "", t) + + +def remove_html_tags(t): + return re.sub("<[^<]+?>", "", t) + + +def remove_first_last_commas(t): + t = t.strip() + t = t[:-1] if t and t[-1] == "," else t + t = t[1:] if t and t[0] == "," else t + return t.strip() + + +def remove_wiki_ref(t): + t = re.sub(r"\A\s*\[\d+\]", "", t) + return re.sub(r"\[\d+\]\s*\Z", "", t) + + +class TextNormalizer: + "Normalize text" + + def __init__(self): + self._hashtag_processor = HashtagProcessor() + + def __call__(self, t): + # fix some characters + t = ftfy.fix_text(t) + # fix html + t = fix_html(t) + # decode emojis (would be removed by unidecode) + t = emoji.demojize(t) + # decode and simplify text: see unidecode library + t = unidecode(t) + # lower case + t = t.lower() + # replace (for CC12M) + t = replace_person_token(t) + # remove wiki reference (for WIT) + t = remove_wiki_ref(t) + # remove html tags + t = remove_html_tags(t) + # remove urls + t = remove_urls(t) + # remove commas in numbers + t = remove_comma_numbers(t) + # handle dots in numbers and quotes - Part 1 + t = pre_process_dot_numbers(t) + t = pre_process_quotes(t) + t = pre_process_dates(t) + # handle special characters + t = handle_special_chars(t) + # handle hashtags + t = expand_hashtags(t, self._hashtag_processor) + # ignore useless characters + t = ignore_chars(t) + # simplify quotes + t = simplify_quotes(t) + # all punctuation becomes commas + t = replace_punctuation_with_commas(t) + # handle dots in numbers and quotes - Part 2 + t = post_process_dot_numbers(t) + t = post_process_quotes(t) + t = post_process_dates(t) + # handle repeating characters + t = remove_repeating_chars(t) + # merge quotes + t = merge_quotes(t) + # merge commas + t = merge_commas(t) + # remove multiple spaces + t = remove_extra_spaces(t) + # remove first and last comma + t = remove_first_last_commas(t) + # always start with a space + return f" {t}" diff --git a/src/dalle_mini/model/tokenizer.py b/src/dalle_mini/model/tokenizer.py new file mode 100644 index 0000000000000000000000000000000000000000..1e6e84aefb199b5086a22ea5e52f0e5eef4f9ab5 --- /dev/null +++ b/src/dalle_mini/model/tokenizer.py @@ -0,0 +1,8 @@ +""" DalleBart tokenizer """ +from transformers import BartTokenizerFast + +from .utils import PretrainedFromWandbMixin + + +class DalleBartTokenizer(PretrainedFromWandbMixin, BartTokenizerFast): + pass diff --git a/src/dalle_mini/model/utils.py b/src/dalle_mini/model/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..99e0686e38b546cf474b2a7f6a2d3f91e34b9a24 --- /dev/null +++ b/src/dalle_mini/model/utils.py @@ -0,0 +1,27 @@ +import os +import tempfile +from pathlib import Path + +import wandb + + +class PretrainedFromWandbMixin: + @classmethod + def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs): + """ + Initializes from a wandb artifact or delegates loading to the superclass. + """ + with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies + if ":" in pretrained_model_name_or_path and not os.path.isdir( + pretrained_model_name_or_path + ): + # wandb artifact + if wandb.run is not None: + artifact = wandb.run.use_artifact(pretrained_model_name_or_path) + else: + artifact = wandb.Api().artifact(pretrained_model_name_or_path) + pretrained_model_name_or_path = artifact.download(tmp_dir) + + return super(PretrainedFromWandbMixin, cls).from_pretrained( + pretrained_model_name_or_path, *model_args, **kwargs + ) diff --git a/tools/dataset/encode_dataset.ipynb b/tools/dataset/encode_dataset.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..2728dbaa63678c3a71ea3cbc210851254fcd0612 --- /dev/null +++ b/tools/dataset/encode_dataset.ipynb @@ -0,0 +1,371 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "d0b72877", + "metadata": {}, + "source": [ + "# Pre-encoding a dataset for DALLE·mini" + ] + }, + { + "cell_type": "markdown", + "id": "ba7b31e6", + "metadata": {}, + "source": [ + "This notebook shows how to pre-encode images to token sequences using JAX, VQGAN and a dataset in the [`webdataset` format](https://webdataset.github.io/webdataset/).\n", + "\n", + "Adapt it to your own dataset and image encoder.\n", + "\n", + "At the end you should have a dataset of pairs:\n", + "* a caption defined as a string\n", + "* an encoded image defined as a list of int." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3b59489e", + "metadata": {}, + "outputs": [], + "source": [ + "from tqdm.notebook import tqdm\n", + "\n", + "import torchvision.transforms as T\n", + "\n", + "import webdataset as wds\n", + "\n", + "import jax\n", + "import braceexpand\n", + "from pathlib import Path" + ] + }, + { + "cell_type": "markdown", + "id": "c7c4c1e6", + "metadata": {}, + "source": [ + "## Configuration Parameters" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "1265dbfe", + "metadata": {}, + "outputs": [], + "source": [ + "shards = \"my_images/shard-{0000..0008}.tar\" # defined using braceexpand format as used by webdataset\n", + "encoded_output = Path(\"encoded_data\") # where we will save our encoded data\n", + "\n", + "VQGAN_REPO, VQGAN_COMMIT_ID = (\n", + " \"dalle-mini/vqgan_imagenet_f16_16384\",\n", + " \"85eb5d3b51a1c62a0cc8f4ccdee9882c0d0bd384\",\n", + ")\n", + "\n", + "# good defaults for a TPU v3-8\n", + "batch_size = 128 # Per device\n", + "num_workers = 8 # For parallel processing\n", + "total_bs = batch_size * jax.device_count() # You can use a smaller size while testing\n", + "save_frequency = 128 # Number of batches to create a new file (180MB for f16 and 720MB for f8 per file)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "cd956ec6-7d98-4d4d-a454-f80fe857eadd", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['XXX/shard-0000.tar',\n", + " 'XXX/shard-0001.tar',\n", + " 'XXX/shard-0002.tar',\n", + " 'XXX/shard-0003.tar',\n", + " 'XXX/shard-0004.tar',\n", + " 'XXX/shard-0005.tar',\n", + " 'XXX/shard-0006.tar',\n", + " 'XXX/shard-0007.tar',\n", + " 'XXX/shard-0008.tar']" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "shards = list(\n", + " braceexpand.braceexpand(shards)\n", + ") # better display for tqdm with known length" + ] + }, + { + "cell_type": "markdown", + "id": "75dba8e2", + "metadata": {}, + "source": [ + "## Load data" + ] + }, + { + "cell_type": "markdown", + "id": "a1e8fb95", + "metadata": {}, + "source": [ + "We load data using `webdataset`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9ef5de9e", + "metadata": {}, + "outputs": [], + "source": [ + "ds = (\n", + " wds.WebDataset(shards, handler=wds.warn_and_continue)\n", + " .decode(\"rgb\", handler=wds.warn_and_continue)\n", + " .to_tuple(\"jpg\", \"txt\") # assumes image is in `jpg` and caption in `txt`\n", + " .batched(total_bs) # load in batch per worker (faster)\n", + ")" + ] + }, + { + "cell_type": "markdown", + "id": "90981824", + "metadata": {}, + "source": [ + "Note:\n", + "* you can also shuffle shards and items using `shardshuffle` and `shuffle` if necessary.\n", + "* you may need to resize images in your pipeline (with `map_dict` for example), we assume they are already set to 256x256.\n", + "* you can also filter out some items using `select`." + ] + }, + { + "cell_type": "markdown", + "id": "129c377d", + "metadata": {}, + "source": [ + "We can now inspect our data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8cac98cb", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%%time\n", + "images, captions = next(iter(ds))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "cd268fbf", + "metadata": {}, + "outputs": [], + "source": [ + "images.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5acfc4d8", + "metadata": {}, + "outputs": [], + "source": [ + "captions[:10]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c24693c0", + "metadata": {}, + "outputs": [], + "source": [ + "T.ToPILImage()(images[0].permute(2, 0, 1))" + ] + }, + { + "cell_type": "markdown", + "id": "3059ffb1", + "metadata": {}, + "source": [ + "Finally we create our dataloader." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c227c551", + "metadata": {}, + "outputs": [], + "source": [ + "dl = (\n", + " wds.WebLoader(ds, batch_size=None, num_workers=8).unbatched().batched(total_bs)\n", + ") # avoid partial batch at the end of each worker" + ] + }, + { + "cell_type": "markdown", + "id": "a354472b", + "metadata": {}, + "source": [ + "## Image encoder\n", + "\n", + "We'll use a VQGAN trained with Taming Transformers and converted to a JAX model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47a8b818", + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "from vqgan_jax.modeling_flax_vqgan import VQModel\n", + "from flax.jax_utils import replicate\n", + "\n", + "vqgan = VQModel.from_pretrained(\"flax-community/vqgan_f16_16384\")\n", + "vqgan_params = replicate(vqgan.params)" + ] + }, + { + "cell_type": "markdown", + "id": "62ad01c3", + "metadata": {}, + "source": [ + "## Encoding" + ] + }, + { + "cell_type": "markdown", + "id": "20357f74", + "metadata": {}, + "source": [ + "Encoding is really simple using `shard` to automatically distribute batches across devices and `pmap`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "322a4619", + "metadata": {}, + "outputs": [], + "source": [ + "from flax.training.common_utils import shard\n", + "from functools import partial\n", + "\n", + "\n", + "@partial(jax.pmap, axis_name=\"batch\")\n", + "def p_encode(batch, params):\n", + " # Not sure if we should `replicate` params, does not seem to have any effect\n", + " _, indices = vqgan.encode(batch, params=params)\n", + " return indices" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ff6c10d4", + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "\n", + "def encode_dataset(dataloader, output_dir, save_frequency):\n", + " output_dir.mkdir(parents=True, exist_ok=True)\n", + " all_captions = []\n", + " all_encoding = []\n", + " n_file = 1\n", + " for idx, (images, captions) in enumerate(tqdm(dataloader)):\n", + " images = images.numpy()\n", + " n = len(images) // 8 * 8\n", + " if n != len(images):\n", + " # get the max number of images we can (multiple of 8)\n", + " print(f\"Different sizes {n} vs {len(images)}\")\n", + " images = images[:n]\n", + " captions = captions[:n]\n", + " if not len(captions):\n", + " print(f\"No images/captions in batch...\")\n", + " continue\n", + " images = shard(images)\n", + " encoded = p_encode(images, vqgan_params)\n", + " encoded = encoded.reshape(-1, encoded.shape[-1])\n", + " all_captions.extend(captions)\n", + " all_encoding.extend(encoded.tolist())\n", + "\n", + " # save files\n", + " if (idx + 1) % save_frequency == 0:\n", + " print(f\"Saving file {n_file}\")\n", + " batch_df = pd.DataFrame.from_dict(\n", + " {\"caption\": all_captions, \"encoding\": all_encoding}\n", + " )\n", + " batch_df.to_parquet(f\"{output_dir}/{n_file:03d}.parquet\")\n", + " all_captions = []\n", + " all_encoding = []\n", + " n_file += 1\n", + "\n", + " if len(all_captions):\n", + " print(f\"Saving final file {n_file}\")\n", + " batch_df = pd.DataFrame.from_dict(\n", + " {\"caption\": all_captions, \"encoding\": all_encoding}\n", + " )\n", + " batch_df.to_parquet(f\"{output_dir}/{n_file:03d}.parquet\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7704863d", + "metadata": {}, + "outputs": [], + "source": [ + "encode_dataset(dl, output_dir=encoded_output, save_frequency=save_frequency)" + ] + }, + { + "cell_type": "markdown", + "id": "8953dd84", + "metadata": {}, + "source": [ + "----" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "db471c52d602b4f5f40ecaf278e88ccfef85c29d0a1a07185b0d51fc7acf4e26" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/tools/inference/inference_pipeline.ipynb b/tools/inference/inference_pipeline.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..6d8ee41f817afac19f80a7d98566deeee73f7998 --- /dev/null +++ b/tools/inference/inference_pipeline.ipynb @@ -0,0 +1,479 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "colab_type": "text", + "id": "view-in-github" + }, + "source": [ + "\"Open" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "118UKH5bWCGa" + }, + "source": [ + "# DALL·E mini - Inference pipeline\n", + "\n", + "*Generate images from a text prompt*\n", + "\n", + "\n", + "\n", + "This notebook illustrates [DALL·E mini](https://github.com/borisdayma/dalle-mini) inference pipeline.\n", + "\n", + "Just want to play? Use [the demo](https://huggingface.co/spaces/flax-community/dalle-mini).\n", + "\n", + "For more understanding of the model, refer to [the report](https://wandb.ai/dalle-mini/dalle-mini/reports/DALL-E-mini--Vmlldzo4NjIxODA)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "dS8LbaonYm3a" + }, + "source": [ + "## 🛠️ Installation and set-up" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "uzjAM2GBYpZX" + }, + "outputs": [], + "source": [ + "# Install required libraries\n", + "!pip install -q git+https://github.com/huggingface/transformers.git\n", + "!pip install -q git+https://github.com/patil-suraj/vqgan-jax.git\n", + "!pip install -q git+https://github.com/borisdayma/dalle-mini.git" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ozHzTkyv8cqU" + }, + "source": [ + "We load required models:\n", + "* dalle·mini for text to encoded images\n", + "* VQGAN for decoding images\n", + "* CLIP for scoring predictions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "K6CxW2o42f-w" + }, + "outputs": [], + "source": [ + "# Model references\n", + "\n", + "# dalle-mini\n", + "DALLE_MODEL = \"dalle-mini/dalle-mini/model-3f0lem84:latest\" # can be wandb artifact or 🤗 Hub or local folder or google bucket\n", + "DALLE_COMMIT_ID = None\n", + "\n", + "# VQGAN model\n", + "VQGAN_REPO = \"dalle-mini/vqgan_imagenet_f16_16384\"\n", + "VQGAN_COMMIT_ID = \"e93a26e7707683d349bf5d5c41c5b0ef69b677a9\"\n", + "\n", + "# CLIP model\n", + "CLIP_REPO = \"openai/clip-vit-large-patch14\"\n", + "CLIP_COMMIT_ID = None" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Yv-aR3t4Oe5v" + }, + "outputs": [], + "source": [ + "import jax\n", + "import jax.numpy as jnp\n", + "\n", + "# check how many devices are available\n", + "jax.local_device_count()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "HWnQrQuXOe5w" + }, + "outputs": [], + "source": [ + "# type used for computation - use bfloat16 on TPU's\n", + "dtype = jnp.bfloat16 if jax.local_device_count() == 8 else jnp.float32\n", + "\n", + "# TODO: fix issue with bfloat16\n", + "dtype = jnp.float32" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "92zYmvsQ38vL" + }, + "outputs": [], + "source": [ + "# Load models & tokenizer\n", + "from dalle_mini import DalleBart, DalleBartProcessor\n", + "from vqgan_jax.modeling_flax_vqgan import VQModel\n", + "from transformers import CLIPProcessor, FlaxCLIPModel\n", + "\n", + "# Load dalle-mini\n", + "model = DalleBart.from_pretrained(\n", + " DALLE_MODEL, revision=DALLE_COMMIT_ID, dtype=dtype, abstract_init=True\n", + ")\n", + "\n", + "# Load VQGAN\n", + "vqgan = VQModel.from_pretrained(VQGAN_REPO, revision=VQGAN_COMMIT_ID)\n", + "\n", + "# Load CLIP\n", + "clip = FlaxCLIPModel.from_pretrained(CLIP_REPO, revision=CLIP_COMMIT_ID)\n", + "clip_processor = CLIPProcessor.from_pretrained(CLIP_REPO, revision=CLIP_COMMIT_ID)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "o_vH2X1tDtzA" + }, + "source": [ + "Model parameters are replicated on each device for faster inference." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "wtvLoM48EeVw" + }, + "outputs": [], + "source": [ + "from flax.jax_utils import replicate\n", + "\n", + "# convert model parameters for inference if requested\n", + "if dtype == jnp.bfloat16:\n", + " model.params = model.to_bf16(model.params)\n", + "\n", + "model._params = replicate(model.params)\n", + "vqgan._params = replicate(vqgan.params)\n", + "clip._params = replicate(clip.params)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0A9AHQIgZ_qw" + }, + "source": [ + "Model functions are compiled and parallelized to take advantage of multiple devices." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "sOtoOmYsSYPz" + }, + "outputs": [], + "source": [ + "from functools import partial\n", + "\n", + "# model inference\n", + "@partial(jax.pmap, axis_name=\"batch\", static_broadcasted_argnums=(3, 4, 5, 6))\n", + "def p_generate(\n", + " tokenized_prompt, key, params, top_k, top_p, temperature, condition_scale\n", + "):\n", + " return model.generate(\n", + " **tokenized_prompt,\n", + " prng_key=key,\n", + " params=params,\n", + " top_k=top_k,\n", + " top_p=top_p,\n", + " temperature=temperature,\n", + " condition_scale=condition_scale,\n", + " )\n", + "\n", + "\n", + "# decode images\n", + "@partial(jax.pmap, axis_name=\"batch\")\n", + "def p_decode(indices, params):\n", + " return vqgan.decode_code(indices, params=params)\n", + "\n", + "\n", + "# score images\n", + "@partial(jax.pmap, axis_name=\"batch\")\n", + "def p_clip(inputs, params):\n", + " logits = clip(params=params, **inputs).logits_per_image\n", + " return logits" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "HmVN6IBwapBA" + }, + "source": [ + "Keys are passed to the model on each device to generate unique inference per device." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "4CTXmlUkThhX" + }, + "outputs": [], + "source": [ + "import random\n", + "\n", + "# create a random key\n", + "seed = random.randint(0, 2**32 - 1)\n", + "key = jax.random.PRNGKey(seed)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BrnVyCo81pij" + }, + "source": [ + "## 🖍 Text Prompt" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "rsmj0Aj5OQox" + }, + "source": [ + "Our model requires processing prompts." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "YjjhUychOVxm" + }, + "outputs": [], + "source": [ + "from dalle_mini import DalleBartProcessor\n", + "\n", + "processor = DalleBartProcessor.from_pretrained(DALLE_MODEL, revision=DALLE_COMMIT_ID)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "BQ7fymSPyvF_" + }, + "source": [ + "Let's define a text prompt." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "x_0vI9ge1oKr" + }, + "outputs": [], + "source": [ + "prompt = \"sunset over the lake in the mountains\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VKjEZGjtO49k" + }, + "outputs": [], + "source": [ + "tokenized_prompt = processor([prompt])" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "-CEJBnuJOe5z" + }, + "source": [ + "Finally we replicate it onto each device." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "lQePgju5Oe5z" + }, + "outputs": [], + "source": [ + "tokenized_prompt = replicate(tokenized_prompt)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "phQ9bhjRkgAZ" + }, + "source": [ + "## 🎨 Generate images\n", + "\n", + "We generate images using dalle-mini model and decode them with the VQGAN." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "d0wVkXpKqnHA" + }, + "outputs": [], + "source": [ + "# number of predictions\n", + "n_predictions = 32\n", + "\n", + "# We can customize top_k/top_p used for generating samples\n", + "gen_top_k = None\n", + "gen_top_p = None\n", + "temperature = 0.85\n", + "cond_scale = 3.0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "SDjEx9JxR3v8" + }, + "outputs": [], + "source": [ + "from flax.training.common_utils import shard_prng_key\n", + "import numpy as np\n", + "from PIL import Image\n", + "from tqdm.notebook import trange\n", + "\n", + "# generate images\n", + "images = []\n", + "for i in trange(n_predictions // jax.device_count()):\n", + " # get a new key\n", + " key, subkey = jax.random.split(key)\n", + " # generate images\n", + " encoded_images = p_generate(\n", + " tokenized_prompt,\n", + " shard_prng_key(subkey),\n", + " model.params,\n", + " gen_top_k,\n", + " gen_top_p,\n", + " temperature,\n", + " cond_scale,\n", + " )\n", + " # remove BOS\n", + " encoded_images = encoded_images.sequences[..., 1:]\n", + " # decode images\n", + " decoded_images = p_decode(encoded_images, vqgan.params)\n", + " decoded_images = decoded_images.clip(0.0, 1.0).reshape((-1, 256, 256, 3))\n", + " for img in decoded_images:\n", + " images.append(Image.fromarray(np.asarray(img * 255, dtype=np.uint8)))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "tw02wG9zGmyB" + }, + "source": [ + "Let's calculate their score with CLIP." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "FoLXpjCmGpju" + }, + "outputs": [], + "source": [ + "from flax.training.common_utils import shard\n", + "\n", + "# get clip scores\n", + "clip_inputs = clip_processor(\n", + " text=[prompt] * jax.device_count(),\n", + " images=images,\n", + " return_tensors=\"np\",\n", + " padding=\"max_length\",\n", + " max_length=77,\n", + " truncation=True,\n", + ").data\n", + "logits = p_clip(shard(clip_inputs), clip.params)\n", + "logits = logits.squeeze().flatten()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "4AAWRm70LgED" + }, + "source": [ + "Let's display images ranked by CLIP score." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "zsgxxubLLkIu" + }, + "outputs": [], + "source": [ + "print(f\"Prompt: {prompt}\\n\")\n", + "for idx in logits.argsort()[::-1]:\n", + " display(images[idx])\n", + " print(f\"Score: {logits[idx]:.2f}\\n\")" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "include_colab_link": true, + "machine_shape": "hm", + "name": "DALL·E mini - Inference pipeline.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.7" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/tools/train/config/medium/config.json b/tools/train/config/medium/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a5b7089b2e88b52b2d1a6febe7042835cbb715d2 --- /dev/null +++ b/tools/train/config/medium/config.json @@ -0,0 +1,31 @@ +{ + "activation_dropout": 0.0, + "activation_function": "gelu", + "attention_dropout": 0.0, + "bos_token_id": 16385, + "d_model": 1408, + "decoder_attention_heads": 16, + "decoder_ffn_dim": 4096, + "decoder_layerdrop": 0.0, + "decoder_layers": 14, + "decoder_start_token_id": 16384, + "dropout": 0.0, + "encoder_attention_heads": 16, + "encoder_ffn_dim": 4096, + "encoder_layerdrop": 0.0, + "encoder_layers": 14, + "encoder_vocab_size": 50264, + "eos_token_id": 16385, + "gradient_checkpointing": false, + "image_length": 256, + "image_vocab_size": 16384, + "init_std": 0.01, + "is_encoder_decoder": true, + "max_text_length": 64, + "model_type": "dallebart", + "normalize_text": true, + "pad_token_id": 16385, + "scale_embedding": false, + "tie_word_embeddings": false, + "use_cache": true +} diff --git a/tools/train/config/mega/config.json b/tools/train/config/mega/config.json new file mode 100644 index 0000000000000000000000000000000000000000..ab3b3e3df19946035ad0e0ab5f8822f5b9f404df --- /dev/null +++ b/tools/train/config/mega/config.json @@ -0,0 +1,30 @@ +{ + "activation_dropout": 0.0, + "activation_function": "gelu", + "attention_dropout": 0.0, + "bos_token_id": 16385, + "d_model": 2048, + "decoder_attention_heads": 32, + "decoder_ffn_dim": 8192, + "decoder_layerdrop": 0.0, + "decoder_layers": 24, + "decoder_start_token_id": 16384, + "dropout": 0.0, + "encoder_attention_heads": 32, + "encoder_ffn_dim": 8192, + "encoder_layerdrop": 0.0, + "encoder_layers": 24, + "encoder_vocab_size": 50264, + "eos_token_id": 16385, + "image_length": 256, + "image_vocab_size": 16391, + "init_std": 0.01, + "is_encoder_decoder": true, + "max_text_length": 64, + "model_type": "dallebart", + "normalize_text": true, + "pad_token_id": 16385, + "scale_embedding": false, + "tie_word_embeddings": false, + "use_cache": true +} diff --git a/tools/train/config/micro/config.json b/tools/train/config/micro/config.json new file mode 100644 index 0000000000000000000000000000000000000000..a296e52670eec721c19b40cbcb405c62bf6a8368 --- /dev/null +++ b/tools/train/config/micro/config.json @@ -0,0 +1,30 @@ +{ + "activation_dropout": 0.0, + "activation_function": "gelu", + "attention_dropout": 0.0, + "bos_token_id": 16385, + "d_model": 256, + "decoder_attention_heads": 2, + "decoder_ffn_dim": 256, + "decoder_layerdrop": 0.0, + "decoder_layers": 2, + "decoder_start_token_id": 16384, + "dropout": 0.0, + "encoder_attention_heads": 2, + "encoder_ffn_dim": 256, + "encoder_layerdrop": 0.0, + "encoder_layers": 2, + "encoder_vocab_size": 50264, + "eos_token_id": 16385, + "image_length": 256, + "image_vocab_size": 16391, + "init_std": 0.02, + "is_encoder_decoder": true, + "max_text_length": 64, + "model_type": "dallebart", + "normalize_text": true, + "pad_token_id": 16385, + "scale_embedding": false, + "tie_word_embeddings": false, + "use_cache": true +} diff --git a/tools/train/config/mini/config.json b/tools/train/config/mini/config.json new file mode 100644 index 0000000000000000000000000000000000000000..febef33a46ae315ba940103495991656708ca15b --- /dev/null +++ b/tools/train/config/mini/config.json @@ -0,0 +1,29 @@ +{ + "activation_dropout": 0.0, + "activation_function": "gelu", + "attention_dropout": 0.0, + "bos_token_id": 16385, + "d_model": 1024, + "decoder_attention_heads": 16, + "decoder_ffn_dim": 4096, + "decoder_layers": 12, + "decoder_start_token_id": 16384, + "dropout": 0.0, + "encoder_attention_heads": 16, + "encoder_ffn_dim": 4096, + "encoder_layers": 12, + "encoder_vocab_size": 50264, + "eos_token_id": 16385, + "gradient_checkpointing": false, + "image_length": 256, + "image_vocab_size": 16384, + "init_std": 0.02, + "is_encoder_decoder": true, + "max_text_length": 64, + "model_type": "dallebart", + "normalize_text": true, + "pad_token_id": 16385, + "scale_embedding": false, + "tie_word_embeddings": false, + "use_cache": true +} diff --git a/tools/train/config/mini_glu/config.json b/tools/train/config/mini_glu/config.json new file mode 100644 index 0000000000000000000000000000000000000000..56388e6559773d8cb77e5d7cc92ead52138ab745 --- /dev/null +++ b/tools/train/config/mini_glu/config.json @@ -0,0 +1,29 @@ +{ + "activation_dropout": 0.0, + "activation_function": "gelu", + "attention_dropout": 0.0, + "bos_token_id": 16385, + "d_model": 1024, + "decoder_attention_heads": 16, + "decoder_ffn_dim": 2730, + "decoder_layers": 12, + "decoder_start_token_id": 16384, + "dropout": 0.0, + "encoder_attention_heads": 16, + "encoder_ffn_dim": 2730, + "encoder_layers": 12, + "encoder_vocab_size": 50264, + "eos_token_id": 16385, + "gradient_checkpointing": false, + "image_length": 256, + "image_vocab_size": 16384, + "init_std": 0.02, + "is_encoder_decoder": true, + "max_text_length": 64, + "model_type": "dallebart", + "normalize_text": true, + "pad_token_id": 16385, + "scale_embedding": false, + "tie_word_embeddings": false, + "use_cache": true +} diff --git a/tools/train/scalable_shampoo/README.md b/tools/train/scalable_shampoo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1b88e8d73e2e003b5ca63dff710e5b651217e75f --- /dev/null +++ b/tools/train/scalable_shampoo/README.md @@ -0,0 +1,7 @@ +# Notes + +Files copied from [google-research/scalable_shampoo/optax](https://github.com/google-research/google-research/tree/master/scalable_shampoo/optax). + +Imports have been modified to be relative. + +This will eventually be replaced with `optax-shampoo` package. diff --git a/tools/train/scalable_shampoo/distributed_shampoo.py b/tools/train/scalable_shampoo/distributed_shampoo.py new file mode 100644 index 0000000000000000000000000000000000000000..0eb228286cc7fddb4a800f901534abea53d8ceea --- /dev/null +++ b/tools/train/scalable_shampoo/distributed_shampoo.py @@ -0,0 +1,2267 @@ +# coding=utf-8 +# Copyright 2022 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# An implementation of distributed Shampoo optimizer from: +# +# Scalable Second Order Optimization for Deep Learning +# Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer +# Preprint Paper: https://arxiv.org/abs/2002.09018 +# +# This implementation moves computation of inverse pth root back to the +# accelerator (if higher precision is available). +# +# Authors: Rohan Anil (rohananil at google dot com) +# & Vineet Gupta (vineet at google dot com) +# +"""Distributed Shampoo Implementation.""" + +import enum +import functools +import itertools +from typing import Any, List, NamedTuple, Tuple + +import chex +import jax +import jax.experimental.pjit as pjit +import jax.numpy as jnp +import numpy as np +import optax +from flax import struct +from jax import lax + +from .quantization_utils import QuantizedValue +from .symmetric_matrices import symmetric_matrices + +# Dtype for inverse-pth root routine +# Switch to f64 if you have hardware that supports it. Enable the jax flag +# jax_enable_x64 for this to work, otherwise it will default to float32. +_MAT_INV_PTH_ROOT_DTYPE = jnp.float64 + + +@struct.dataclass +class TrainingMetrics: + inverse_pth_root_errors: chex.Array # Error for inverse-pth roots. + # TODO(rohananil): Add more important metrics to track during training. + + +# Per parameter optimizer state used in data-parallel training. +class ParameterStats(NamedTuple): + """State associated to each parameter of the model being trained.""" + + diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner + statistics: List[Any] # Statistics (QuantizedValue, chex.Array) + preconditioners: List[Any] # Preconditioners (QuantizedValue, chex.Array) + diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner + momentum: QuantizedValue # Momentum for the shampoo preconditioner + training_metrics: TrainingMetrics # Metrics (optional for training). + + +# For training extremely large model; We keep a global state with a concatenated +# statistics and preconditioner states for all vars. This is so that we can +# annotate the leading axis to be sharded to save memory at the cost of +# communication. +@struct.dataclass +class GlobalShardedParameterStats: + statistics: chex.Array # Statistics + preconditioners: chex.Array # Preconditioners + exponents: chex.Array # exponents + + +# These are per-parameter local states; All statistics here mirror the parameter +# Thus the sharding is copied over from the param specification. +@struct.dataclass +class LocalShardedParameterStats: + """State associated to each parameter of the model being trained.""" + + diagonal_statistics: QuantizedValue # Accumulator for diagonal preconditioner + diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner + momentum: QuantizedValue # Momentum for the shampoo preconditioner + training_metrics: TrainingMetrics # Metrics (optional for training). + index_start: np.int32 = struct.field( + pytree_node=False + ) # Index into global statistics array + sizes: Any = struct.field(pytree_node=False) # Sizes of the statistics. + + +def init_training_metrics(num_statistics): + # Since the downstream apis expect a jnp.array - we create a dummy one if + # num_statistics=0. + n = 1 if not num_statistics else num_statistics + return TrainingMetrics(jnp.zeros([n], jnp.float32)) + + +def init_training_metrics_shapes(num_statistics): + # Since the downstream apis expect a jnp.array - we create a dummy one if + # num_statistics=0. + n = 1 if not num_statistics else num_statistics + return TrainingMetrics([[n], jnp.float32]) + + +def init_training_metrics_pspec(): + return TrainingMetrics(pjit.PartitionSpec()) + + +class ShardedShampooStats(NamedTuple): + """Shampoo state in sharded mode.""" + + global_stats: Any + local_stats: Any + + +class ShampooState(NamedTuple): + count: chex.Array + stats: Any + + +class InitFnState(NamedTuple): + init_fn: Any + pspec_fn: Any + shape_and_dtype_fn: Any + + +class GraftingType(enum.IntEnum): + SGD = 1 + ADAGRAD = 2 + RMSPROP = 3 + RMSPROP_NORMALIZED = 4 + SQRT_N = 5 + ADAGRAD_NORMALIZED = 6 + + +def power_iteration( + matrix, + num_iters=100, + error_tolerance=1e-6, + precision=lax.Precision.HIGHEST, +): + r"""Power iteration algorithm. + + The power iteration algorithm takes a symmetric PSD matrix `A`, and produces + a scalar `\lambda` , which is the greatest (in absolute value) eigenvalue + of `A`, and a vector v, which is the corresponding eigenvector of `A`. + + References: + [Wikipedia, 2021](https://en.wikipedia.org/wiki/Power_iteration) + + Args: + matrix: the symmetric PSD matrix. + num_iters: Number of iterations. + error_tolerance: Iterative exit condition. + precision: precision XLA related flag, the available options are: a) + lax.Precision.DEFAULT (better step time, but not precise) b) + lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST + (best possible precision, slowest) + + Returns: + eigen vector, eigen value + """ + matrix_size = matrix.shape[-1] + + def _iter_condition(state): + i, unused_v, unused_s, unused_s_v, run_step = state + return jnp.logical_and(i < num_iters, run_step) + + def _iter_body(state): + """One step of power iteration.""" + i, new_v, s, s_v, unused_run_step = state + new_v = new_v / jnp.linalg.norm(new_v) + + s_v = jnp.einsum("ij,j->i", matrix, new_v, precision=precision) + s_new = jnp.einsum("i,i->", new_v, s_v, precision=precision) + return ( + i + 1, + s_v, + s_new, + s_v, + jnp.greater(jnp.abs(s_new - s), error_tolerance), + ) + + # Figure out how to use step as seed for random. + v_0 = ( + np.random.RandomState(1729).uniform(-1.0, 1.0, matrix_size).astype(matrix.dtype) + ) + + init_state = tuple([0, v_0, jnp.zeros([], dtype=matrix.dtype), v_0, True]) + _, v_out, s_out, _, _ = lax.while_loop(_iter_condition, _iter_body, init_state) + v_out = v_out / jnp.linalg.norm(v_out) + return v_out, s_out + + +def mat_power( + mat_m, + p, + precision=lax.Precision.HIGHEST, +): + """A simple matrix power method. M^p where p can be TracedValue.""" + power = jnp.eye(mat_m.shape[0], dtype=_MAT_INV_PTH_ROOT_DTYPE) + + def _iter_condition(state): + i, _, _ = state + return i > 0 + + def _iter_body(state): + i, power, mat = state + + power = jax.lax.cond( + i % 2 == 1, + lambda: jnp.matmul(mat, power, precision=precision), + lambda: power, + ) + i //= 2 + mat = jnp.matmul(mat, mat, precision=precision) + return i, power, mat + + _, result, _ = lax.while_loop(_iter_condition, _iter_body, (p, power, mat_m)) + return result + + +def matrix_inverse_pth_root( + matrix, + p, + num_iters=100, + ridge_epsilon=1e-6, + error_tolerance=1e-6, + precision=lax.Precision.HIGHEST, +): + """Computes `matrix^(-1/p)`, where `p` is a positive integer. + + This function uses the Coupled newton iterations algorithm for + the computation of a matrix's inverse pth root. + + + References: + [Functions of Matrices, Theory and Computation, + Nicholas J Higham, Pg 184, Eq 7.18]( + https://epubs.siam.org/doi/book/10.1137/1.9780898717778) + + Args: + matrix: the symmetric PSD matrix whose power it to be computed + p: exponent, for p a positive integer. + num_iters: Maximum number of iterations. + ridge_epsilon: Ridge epsilon added to make the matrix positive definite. + error_tolerance: Error indicator, useful for early termination. + precision: precision XLA related flag, the available options are: a) + lax.Precision.DEFAULT (better step time, but not precise) b) + lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST + (best possible precision, slowest) + + Returns: + matrix^(-1/p) + """ + + # If the input is not square, materialize it from the concatenated form. + if matrix.shape[0] != matrix.shape[1]: + matrix = symmetric_matrices.materialize_matrix_from_concat(matrix) + + assert matrix.shape[0] == matrix.shape[1] + + # We use _MAT_INV_PTH_ROOT_DTYPE for the matrix inverse pth root. + # Switch to f64 if you have hardware that supports it. Enable the jax flag + # jax_enable_x64 for this to work. + matrix_size = matrix.shape[0] + orig_dtype = matrix.dtype + matrix = matrix.astype(_MAT_INV_PTH_ROOT_DTYPE) + alpha = jnp.asarray(-1.0 / p, _MAT_INV_PTH_ROOT_DTYPE) + identity = jnp.eye(matrix_size, dtype=_MAT_INV_PTH_ROOT_DTYPE) + _, max_ev = power_iteration( + matrix=matrix, num_iters=100, error_tolerance=1e-6, precision=precision + ) + ridge_epsilon = ridge_epsilon * jnp.maximum(max_ev, 1e-6) + + def _iter_condition(state): + (i, unused_mat_m, unused_mat_h, unused_old_mat_h, error, run_step) = state + error_above_threshold = jnp.logical_and(error > error_tolerance, run_step) + return jnp.logical_and(i < num_iters, error_above_threshold) + + def _iter_body(state): + (i, mat_m, mat_h, unused_old_mat_h, error, unused_run_step) = state + mat_m_i = (1 - alpha) * identity + alpha * mat_m + new_mat_m = jnp.matmul(mat_power(mat_m_i, p), mat_m, precision=precision) + new_mat_h = jnp.matmul(mat_h, mat_m_i, precision=precision) + new_error = jnp.max(jnp.abs(new_mat_m - identity)) + # sometimes error increases after an iteration before decreasing and + # converging. 1.2 factor is used to bound the maximal allowed increase. + return (i + 1, new_mat_m, new_mat_h, mat_h, new_error, new_error < error * 1.2) + + if matrix_size == 1: + resultant_mat_h = (matrix + ridge_epsilon) ** alpha + error = 0 + else: + damped_matrix = matrix + ridge_epsilon * identity + + z = (1 + p) / (2 * jnp.linalg.norm(damped_matrix)) + new_mat_m_0 = damped_matrix * z + new_error = jnp.max(jnp.abs(new_mat_m_0 - identity)) + new_mat_h_0 = identity * jnp.power(z, 1.0 / p) + init_state = tuple([0, new_mat_m_0, new_mat_h_0, new_mat_h_0, new_error, True]) + _, mat_m, mat_h, old_mat_h, error, convergence = lax.while_loop( + _iter_condition, _iter_body, init_state + ) + error = jnp.max(jnp.abs(mat_m - identity)).astype(jnp.float32) + is_converged = jnp.asarray(convergence, old_mat_h.dtype) + resultant_mat_h = is_converged * mat_h + (1 - is_converged) * old_mat_h + resultant_mat_h = jnp.asarray(resultant_mat_h, orig_dtype) + return resultant_mat_h, error + + +def merge_small_dims(shape_to_merge, max_dim): + """Merge small dimensions. + + If there are some small dimensions, we collapse them: + e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if max_dim = 1024 + [1, 2, 768, 1, 2048] --> [2, 768, 2048] + + Args: + shape_to_merge: Shape to merge small dimensions. + max_dim: Maximal dimension of output shape used in merging. + + Returns: + Merged shape. + """ + if shape_to_merge and np.all(np.array(shape_to_merge) == 1): + return [1] + + resulting_shape = [] + product = 1 + for d in shape_to_merge: + if product * d <= max_dim: + product *= d + else: + if product > 1: + resulting_shape.append(product) + product = d + if product > 1: + resulting_shape.append(product) + return resulting_shape + + +def pad_square_matrix(mat, max_size): + """Pad a square matrix up to max_size. + + Args: + mat: a matrix to pad. + max_size: matrix size requested. + + Returns: + Given M returns [[M, 0], [0, I]] + """ + rows, cols = mat.shape + if rows != cols: + raise ValueError( + "Must have rows == cols, instead got " f"rows={rows}, cols={cols}" + ) + if cols > max_size: + raise ValueError( + "Must have cols <= max_size. Instead got " + f"cols={cols}, max_size={max_size}." + ) + if rows == max_size: + return mat + pad_size = max_size - rows + + zs1 = jnp.zeros([rows, pad_size], dtype=mat.dtype) + zs2 = jnp.zeros([pad_size, rows], dtype=mat.dtype) + eye = jnp.eye(pad_size, dtype=mat.dtype) + mat = jnp.concatenate([mat, zs1], 1) + mat = jnp.concatenate([mat, jnp.concatenate([zs2, eye], 1)], 0) + return mat + + +def make_sliced_padding( + symmetric_block_size, + num_blocks, + starting_block, + dtype, +): + """Returns padding for symmetric block matrix. + + Specifically, the padding is given concatenated rectangular matrices + representing the lower-triangular rows below the starting block. For example, + if we want to pad the symmetric matrix + + M = [[A, B^T] + [B, C]], + + the desired output (in terms of the full matrix) with num_blocks = 4 is + + M_padded = [[A, B^T, 0, 0] + [B, C, 0, 0] + [0, 0, I, 0] + 0, 0, 0, I]. + + We would represent M as the block matrix mat = [A, B, C]. In this form, the + additional padding to provide has form [0, 0, I, 0, 0, 0, I] (only the lower + triangular parts in the third and fourth rows). + + Args: + symmetric_block_size: The size of each block. + num_blocks: The total number of blocks. + starting_block: The block where to start the padding. + dtype: The type to use for the blocks. + """ + if starting_block == num_blocks: + return jnp.zeros(shape=(symmetric_block_size, 0), dtype=dtype) + + blocks = [] + for i in range(starting_block, num_blocks): + blocks.append( + jnp.zeros( + shape=(symmetric_block_size, symmetric_block_size * i), dtype=dtype + ) + ) + blocks.append(jnp.eye(symmetric_block_size, dtype=dtype)) + return jnp.concatenate(blocks, axis=-1) + + +def pad_block_symmetric_matrix( + mat, + symmetric_block_size, + max_num_blocks, +): + """Returns the padded blocked symmetric matrix. + + The size of the padded matrix will be: + [symmetric_block_size, symmetric_block_size * max_num_blocks] + + The input matrix can either: + - Be square with size less or equal to symmetric_block_size. In this case, + mat will first be padded to a square matrix of size symmetric_block_size, + and then be padded again up to the full size of the blocked matrix. + - Be a rectangle with number of rows equal to block size. + In this case, number of columns must be a multiple of number of rows, and + the ratio must correspond to a block representation of a symmetric matrix. + That is, the ratio must have form x * (x + 1) / 2. Here, x represents the + number of block rows represented by the matrix. + + Args: + mat: The input block matrix. + symmetric_block_size: The size of blocks. + max_num_blocks: The largest number of blocks to pad to. + """ + rows, cols = mat.shape + if rows > symmetric_block_size: + raise ValueError( + "Must have rows <= symmetric_block_size. Instead got " + f"rows={rows}, symmetric_block_size={symmetric_block_size}." + ) + if rows > cols: + raise ValueError( + "Must have rows <= cols, instead got " f"rows={rows}, cols={cols}." + ) + if cols > symmetric_block_size * max_num_blocks: + raise ValueError( + "Must have cols <= symmetric_block_size * max_num_blocks " + f"Instead got cols={cols}, " + f"symmetric_block_size={symmetric_block_size}, " + f"max_num_blocks={max_num_blocks}." + ) + if rows < symmetric_block_size: + mat = pad_square_matrix(mat, max_size=symmetric_block_size) + # Update rows and cols after possibly padding in pad_square_matrix. + rows, cols = mat.shape + assert rows == symmetric_block_size + assert cols % rows == 0 + filled_blocks = cols // rows + padding_blocks = make_sliced_padding( + symmetric_block_size=symmetric_block_size, + num_blocks=symmetric_matrices.num_blocks_from_total_blocks(max_num_blocks), + starting_block=symmetric_matrices.num_blocks_from_total_blocks(filled_blocks), + dtype=mat.dtype, + ) + return jnp.concatenate([mat, padding_blocks], axis=-1) + + +def pad_vector(vec, max_size): + """Pad a vector to a max_size. + + Args: + vec: a vector to pad. + max_size: matrix size requested. + + Returns: + Given V returns [V, 0] + """ + size = vec.shape[0] + assert size <= max_size + if size == max_size: + return vec + pad_size = max_size - size + zs1 = jnp.zeros([pad_size], dtype=vec.dtype) + return jnp.concatenate([vec, zs1], 0) + + +def efficient_cond(predicate, compute_fn, init_state, *args, **kwargs): + """Avoids wasteful buffer allocation with XLA.""" + + def _iter_body(unused_state): + results = compute_fn(*args, **kwargs) + return tuple([False] + list(results)) + + def _iter_condition(state): + return state[0] + + results = jax.lax.while_loop( + _iter_condition, _iter_body, tuple([predicate] + init_state) + ) + return tuple(results[1:]) + + +class BlockPartitioner: + """Partitions a tensor into smaller tensors.""" + + def __init__(self, param, block_size): + self._shape = param.shape + self._splits = [] + split_sizes = [] + # We split params into smaller blocks. Here we store the metadata to make + # that split. + for i, d in enumerate(param.shape): + if 0 < block_size < d: + # d-1, otherwise split appends a 0-size array. + nsplit = (d - 1) // block_size + indices = (np.arange(nsplit, dtype=np.int32) + 1) * block_size + sizes = np.ones(nsplit + 1, dtype=np.int32) * block_size + sizes[-1] = d - indices[-1] + self._splits.append((i, indices)) + split_sizes.append(sizes) + else: + split_sizes.append(np.array([d], dtype=np.int32)) + self._num_splits = len(split_sizes) + self._preconditioner_shapes = [] + for t in itertools.product(*split_sizes): + self._preconditioner_shapes.extend([[d, d] for d in t]) + + def shapes_for_preconditioners(self): + return self._preconditioner_shapes + + def num_splits(self): + return self._num_splits + + def partition(self, tensor): + """Partition tensor into blocks.""" + + assert tensor.shape == self._shape + tensors = [tensor] + for (i, indices) in self._splits: + tensors_local = [] + for t in tensors: + tensors_local.extend(jnp.split(t, indices_or_sections=indices, axis=i)) + tensors = tensors_local + return tensors + + def merge_partitions(self, partitions): + """Merge partitions back to original shape.""" + + for (i, indices) in reversed(self._splits): + n = len(indices) + 1 + partial_merged_tensors = [] + ind = 0 + while ind < len(partitions): + partial_merged_tensors.append( + jnp.concatenate(partitions[ind : ind + n], axis=i) + ) + ind += n + partitions = partial_merged_tensors + assert len(partitions) == 1 + return partitions[0] + + +class Preconditioner: + """Compute statistics/shape from gradients for preconditioning.""" + + def __init__(self, param, block_size, best_effort_shape_interpretation): + self._original_shape = param.shape + self._transformed_shape = param.shape + if best_effort_shape_interpretation: + self._transformed_shape = merge_small_dims(self._original_shape, block_size) + reshaped_param = jnp.reshape(param, self._transformed_shape) + self._partitioner = BlockPartitioner(reshaped_param, block_size) + + def statistics_from_grad(self, grad): + """Compute statistics from gradients. + + Args: + grad: Gradient to compute statistics from. + + Returns: + A list of gradient statistics for each partition. + """ + reshaped_grad = jnp.reshape(grad, self._transformed_shape) + partitioned_grads = self._partitioner.partition(reshaped_grad) + stats = [] + for g in partitioned_grads: + g_stats = [] + rank = len(g.shape) + for i in range(rank): + axes = list(range(i)) + list(range(i + 1, rank)) + stat = jnp.tensordot(g, g, axes=(axes, axes)) + g_stats.append(stat) + stats.extend(g_stats) + return stats + + def shapes_for_preconditioners(self): + """Returns shape from statistics.""" + return self._partitioner.shapes_for_preconditioners() + + def exponent_for_preconditioner(self): + """Returns exponent to use for inverse-pth root M^{-1/p}.""" + return 2 * len(self._transformed_shape) + + def preconditioned_grad(self, grad, preconditioners): + """Precondition the gradient. + + Args: + grad: A gradient tensor to precondition. + preconditioners: A list of preconditioners to apply. + + Returns: + A preconditioned gradient. + """ + + reshaped_grad = jnp.reshape(grad, self._transformed_shape) + partitioned_grads = self._partitioner.partition(reshaped_grad) + preconditioned_partitioned_grads = [] + num_splits = self._partitioner.num_splits() + for i, g in enumerate(partitioned_grads): + preconditioners_for_grad = preconditioners[ + i * num_splits : (i + 1) * num_splits + ] + rank = len(g.shape) + precond_g = g + for j in range(rank): + precond_g = jnp.tensordot( + precond_g, preconditioners_for_grad[j], axes=[[0], [0]] + ) + preconditioned_partitioned_grads.append(precond_g) + merged_grad = self._partitioner.merge_partitions( + preconditioned_partitioned_grads + ) + return jnp.reshape(merged_grad, self._original_shape) + + +def _convert_to_parameter_stats(global_stats, local_stat): + """Creates parameter stats from sharded stats.""" + index_start = int(local_stat.index_start) + index_end = int(len(local_stat.sizes)) + index_start + statistics = global_stats.statistics[index_start:index_end, :, :] + preconditioners = global_stats.preconditioners[index_start:index_end, :, :] + new_statistics = [] + new_preconditioners = [] + for i, size in enumerate(local_stat.sizes): + new_statistics.append(statistics[i][:size, :size]) + new_preconditioners.append(preconditioners[i][:size, :size]) + return ParameterStats( + local_stat.diagonal_statistics, + new_statistics, + new_preconditioners, + local_stat.diagonal_momentum, + local_stat.momentum, + local_stat.training_metrics, + ) + + +def _convert_from_parameter_stats(parameter_stats, local_stats): + """Creates sharded stats from paramter stats.""" + return LocalShardedParameterStats( + parameter_stats.diagonal_statistics, + parameter_stats.diagonal_momentum, + parameter_stats.momentum, + parameter_stats.training_metrics, + local_stats.index_start, + local_stats.sizes, + ) + + +def _add_error_into_local_stats(local_stats, errors, inverse_failure_threshold): + """Adds errors back into local statistics.""" + new_local_stats = [] + for local_stat in local_stats: + index_start = int(local_stat.index_start) + index_end = int(len(local_stat.sizes)) + index_start + per_stat_error = errors[index_start:index_end] + if local_stat.sizes: + per_stat_error = jnp.where( + jnp.logical_and( + per_stat_error > 0.0, per_stat_error != inverse_failure_threshold + ), + per_stat_error, + local_stat.training_metrics.inverse_pth_root_errors, + ) + new_local_stats.append( + LocalShardedParameterStats( + local_stat.diagonal_statistics, + local_stat.diagonal_momentum, + local_stat.momentum, + TrainingMetrics(per_stat_error), + local_stat.index_start, + local_stat.sizes, + ) + ) + return new_local_stats + + +def batch(x, num_devices): + """Batch `x` so that so that leading axis is num_devices.""" + n = len(x) + b = int(n / num_devices) + return jnp.stack([jnp.stack(x[idx : idx + b]) for idx in range(0, n, b)]) + + +def unbatch(batched_values): + """Unbatch values across leading axis and return a list of elements.""" + b1, b2 = batched_values.shape[0], batched_values.shape[1] + results = [] + for v_array in jnp.split(batched_values, indices_or_sections=b1, axis=0): + v_array = jnp.squeeze(v_array) + # b2 = batches (number of preconditioner computation) per core. + if b2 > 1: + for v in jnp.split(v_array, indices_or_sections=b2, axis=0): + results.append(jnp.squeeze(v)) + else: + results.append(v_array) + return results + + +def distributed_shampoo( + learning_rate, + block_size, + beta1=0.9, + beta2=0.999, + diagonal_epsilon=1e-10, + matrix_epsilon=1e-6, + weight_decay=0.0, + start_preconditioning_step=5, + preconditioning_compute_steps=1, + statistics_compute_steps=1, + best_effort_shape_interpretation=True, + graft_type=GraftingType.SGD, + nesterov=True, + exponent_override=0, + # Pass pmap 'batch axis name' in pmap mode. + batch_axis_name=None, + ### Only set following 3 params in pjit/spmd mode. + ### WARNING: Experimental + statistics_partition_spec=None, + preconditioner_partition_spec=None, + num_devices_for_pjit=None, + shard_optimizer_states=False, + ### + ### Experimental memory reduction mode + best_effort_memory_usage_reduction=False, + ### + inverse_failure_threshold=0.1, + moving_average_for_momentum=False, + skip_preconditioning_dim_size_gt=4096, + clip_by_scaled_gradient_norm=None, + precision=lax.Precision.HIGHEST, +): + """Distributed Shampoo optimizer. + + Distributed Shampoo is a second-order preconditioned method (concretely, a + variant of full-matrix Adagrad), that provides significant convergence and + wall-clock time improvements compared to conventional first-order methods, + and that has been shown to scale to large state-of-the-art deep learning + models. + + References: + Scalable Second Order Optimization for Deep Learning, + Rohan Anil, Vineet Gupta, Tomer Koren, Kevin Regan, Yoram Singer + + Preprint: https://arxiv.org/abs/2002.09018 + + Args: + learning_rate: the step size used to update the parameters. + block_size: Block size for large layers (if > 0). Preconditioning compute + operation is cubic in the dimension of the tensor. Block size allows us to + chunk the layers into sub-layers of maximal dimension dictated by this + value. Use 128 as default (increase if you have compute budget). + beta1: momentum parameter. + beta2: second moment averaging parameter. + diagonal_epsilon: epsilon for diagonal adagrad (only if layerwise grafting + to AdaGrad is enabled). + matrix_epsilon: epsilon to add to statistics before computing inverse pth + root. If you are running in f32 precision for inverse pth root + (recommended today) this can go upto 1e-6. If you have latest hardware + with native f64 precision, set this upto 1e-12. + weight_decay: Weight decay for regularization. + start_preconditioning_step: When to start Shampoo update before which + diagonal update is used. This is because we dont have enough information + to do stable inverse. + preconditioning_compute_steps: How often to compute preconditioner. + Performance tuning params for controlling memory and compute requirements. + Ideally set this and statistics_compute_steps params to 1. + statistics_compute_steps: How often to compute statistics. + best_effort_shape_interpretation: If there are some small dimensions, + collapse them e.g. [1, 2, 512, 1, 2048, 1, 3, 4] --> [1024, 2048, 12] if + block = 1024, [1, 2, 768, 1, 2048] --> [2, 768, 2048] + graft_type: Grafting is a technique to fix the layerwise scale of Shampoo + optimizer. This allows us to plugin the Shampoo optimizer into settings + where SGD/AdaGrad is already well tuned. + nesterov: Nesterov momentum. + exponent_override: Override the exponent used in matrix inverse. + batch_axis_name: labeled axis over pmap for data-parallel training the + optimizer used for. + statistics_partition_spec: PartitionSpec to be used in sharded mode. + preconditioner_partition_spec: PartitionSpec to be used in sharded mode. + num_devices_for_pjit: Number of devices to parallelize over when using pjit. + shard_optimizer_states: Shard optimizer states to save memory in model + parallel training. + best_effort_memory_usage_reduction: Best effort memory usage reduction. - + diagonal_statistics -> jnp.bfloat16 - momentum buffers (2x) -> jnp.int8 - + statistics, preconditioners -> jnp.int16 + diagonals + inverse_failure_threshold: numerics are hard and inverses fail sometimes; we + determine that using this threshold. + moving_average_for_momentum: Whether to use moving average for momentum + instead of exponential moving average. + skip_preconditioning_dim_size_gt: Skip if preconditioning dim size is + greater than this value. + clip_by_scaled_gradient_norm: Clip by scaled gradient norm (only useful when + using RMSProp Grafting). + precision: precision XLA related flag, the available options are: a) + lax.Precision.DEFAULT (better step time, but not precise) b) + lax.Precision.HIGH (increased precision, slower) c) lax.Precision.HIGHEST + (best possible precision, slowest) + + Returns: + a GradientTransformation. + """ + + def _graft_type_has_diagonal_statistics(): + """Returns True if using diagonal firt order method for grafting.""" + return graft_type != GraftingType.SGD and graft_type != GraftingType.SQRT_N + + def _graft_type_has_diagonal_momentum_states(): + """Returns False if using SQRT_N for grafting.""" + return graft_type != GraftingType.SQRT_N + + def quantized_dtype_for_momentum_buffers(): + return jnp.int8 if best_effort_memory_usage_reduction else jnp.float32 + + # TODO(rohananil): Explore int8-16 quantization with non-linear bucket sizes. + def quantized_dtype_for_diagonal_statistics_buffers(): + return jnp.float32 + + # Preconditioner and statistics are both stores as int16 in this mode. + # We take out the diagonal to make quantization easier. + def quantized_dtype_for_second_moment_statistics_buffers(): + return ( + jnp.int16 + if best_effort_memory_usage_reduction and batch_axis_name + else jnp.float32 + ) + + # Preconditioner and statistics are both stores as int16 in this mode. + # We take out the diagonal to make quantization easier. + def quantized_dtype_for_second_moment_preconditioner_buffers(): + return ( + jnp.int16 + if best_effort_memory_usage_reduction and batch_axis_name + else jnp.float32 + ) + + def _to_float(maybe_quantized): + if isinstance(maybe_quantized, QuantizedValue): + return maybe_quantized.to_float() + else: + return maybe_quantized + + def _maybe_quantize_statistics(statistics_list): + return _maybe_quantize_matrices_with_dtype( + statistics_list, quantized_dtype_for_second_moment_statistics_buffers() + ) + + def _maybe_quantize_preconditioners(statistics_list): + return _maybe_quantize_matrices_with_dtype( + statistics_list, quantized_dtype_for_second_moment_preconditioner_buffers() + ) + + def _maybe_quantize_matrices_with_dtype(statistics_list, quantized_dtype): + if quantized_dtype != jnp.float32: + return [ + QuantizedValue.from_float_value( + s, quantized_dtype, extract_diagonal=True + ) + for s in statistics_list + ] + else: + return statistics_list + + def _maybe_dequantize_preconditioners(preconditioner_list): + return _maybe_dequantize_matrices_with_dtype( + preconditioner_list, + quantized_dtype_for_second_moment_preconditioner_buffers(), + ) + + def _maybe_dequantize_matrices_with_dtype(statistics_list, quantized_dtype): + if quantized_dtype != jnp.float32: + return [s.to_float() for s in statistics_list] + else: + return statistics_list + + def _quantize_diagonal_statistics(diagonal_statistics): + return QuantizedValue.from_float_value( + diagonal_statistics, quantized_dtype_for_diagonal_statistics_buffers() + ) + + def _quantize_momentum(momentum_statistics): + return QuantizedValue.from_float_value( + momentum_statistics, quantized_dtype_for_momentum_buffers() + ) + + def sharded_init_fn(params): + """Returns optimizer state (for PJIT mode). + + Args: + params: the parameters that should be updated. + """ + params_flat, treedef = jax.tree_flatten(params) + # Find max size to pad to. + max_size = 0 + for param in params_flat: + preconditioner = Preconditioner( + param, block_size, best_effort_shape_interpretation + ) + if not _skip_preconditioning(param): + shapes = preconditioner.shapes_for_preconditioners() + sizes = [s[0] for s in shapes] + max_size = max(max(sizes), max_size) + + padded_statistics = [] + padded_preconditioners = [] + local_stats_flat = [] + exponents = [] + for param in params_flat: + preconditioner = Preconditioner( + param, block_size, best_effort_shape_interpretation + ) + shapes = preconditioner.shapes_for_preconditioners() + sizes = [] + + statistics = [] + preconditioners = [] + index_start = len(padded_statistics) + if not _skip_preconditioning(param): + sizes = [s[0] for s in shapes] + shapes = preconditioner.shapes_for_preconditioners() + statistics = [ + matrix_epsilon * jnp.eye(max_size, dtype=jnp.float32) + for s in shapes + ] + preconditioners = [jnp.eye(max_size, dtype=jnp.float32) for s in shapes] + padded_statistics.extend(statistics) + padded_preconditioners.extend(preconditioners) + exponent = ( + preconditioner.exponent_for_preconditioner() + if exponent_override == 0 + else exponent_override + ) + exponents.extend([exponent] * len(shapes)) + + diagonal_statistics = [] + if _graft_type_has_diagonal_statistics(): + diagonal_statistics = jnp.zeros_like(param) + + diagonal_momentum = _quantize_momentum([]) + momentum = _quantize_momentum(jnp.zeros_like(param)) + if _graft_type_has_diagonal_momentum_states(): + diagonal_momentum = _quantize_momentum((jnp.zeros_like(param))) + + local_stats_flat.append( + LocalShardedParameterStats( + _quantize_diagonal_statistics(diagonal_statistics), + diagonal_momentum, + momentum, + init_training_metrics(len(sizes)), + index_start, + sizes, + ) + ) + + local_stats = jax.tree_unflatten(treedef, local_stats_flat) + to_pad = -len(padded_statistics) % num_devices_for_pjit + if max_size == 0: + to_pad = num_devices_for_pjit + max_size = block_size + stat_dtype = jnp.float32 + else: + stat_dtype = padded_statistics[0].dtype + # Pad the statistics and preconditioner matrices to be a multiple of + # num devices. + # TODO(rohananil): Relax to only the size of the mesh axis where the dim + # is split on. + padded_statistics.extend( + [jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)] + ) + padded_preconditioners.extend( + [jnp.eye(max_size, dtype=stat_dtype) for _ in range(to_pad)] + ) + exponents.extend([1 for _ in range(to_pad)]) + global_stats = GlobalShardedParameterStats( + jnp.stack(padded_statistics), + jnp.stack(padded_preconditioners), + jnp.stack(exponents), + ) + return ShampooState( + count=jnp.zeros([], jnp.int32), + stats=ShardedShampooStats(global_stats, local_stats), + ) + + def _max_statistics_size_from_params(params): + max_size = 0 + for param in params: + param_clone = jnp.zeros(param.shape, dtype=param.dtype) + preconditioner = Preconditioner( + param_clone, block_size, best_effort_shape_interpretation + ) + if not _skip_preconditioning(param): + shapes = preconditioner.shapes_for_preconditioners() + sizes = [s[0] for s in shapes] + max_size = max(max(sizes), max_size) + return max_size + + def _remove_leading_sharding_annotation(pspec): + """Mapping from N-d to (N-1)-d, used for quantization, factoring etc.""" + # None and PSpec(None) are valid PSpecs. + if pspec and len(pspec) > 1: + return pjit.PartitionSpec(*pspec[1:]) + else: + return [] + + def sharded_init_partition_spec_fn( + params, params_partition_spec, partition_spec_for_statistics + ): + """Returns a parallel state tree with PartitionSpec associated with state. + + + Args: + params: A pytree with params. + params_partition_spec: A pytree with PartitionSpec for params. + partition_spec_for_statistics: PartitionSpec for the statistics. + """ + # Parallel lists of spec, and params. + param_pspec_flat, _ = jax.tree_flatten( + params_partition_spec, is_leaf=lambda x: x is None + ) + params_flat, treedef = jax.tree_flatten(params) + assert param_pspec_flat + assert params_flat + # Step is replicated across cores. + # None means cores. + local_stats_flat = [] + num_statistics = 0 + for param, param_pspec in zip(params_flat, param_pspec_flat): + param_clone = jnp.zeros(param.shape, dtype=param.dtype) + preconditioner = Preconditioner( + param_clone, block_size, best_effort_shape_interpretation + ) + shapes = preconditioner.shapes_for_preconditioners() + sizes = [] + + index_start = num_statistics + if not _skip_preconditioning(param): + sizes = [s[0] for s in shapes] + shapes = preconditioner.shapes_for_preconditioners() + num_statistics += len(shapes) + + diagonal_statistics_pspec = [] + diagonal_statistics_scale_pspec = [] + if _graft_type_has_diagonal_statistics(): + # Identically shaped param. + diagonal_statistics_pspec = param_pspec + if quantized_dtype_for_diagonal_statistics_buffers() != jnp.float32: + diagonal_statistics_scale_pspec = ( + _remove_leading_sharding_annotation(param_pspec) + ) + + m1_pspec = [] + m1_scale_pspec = [] + if _graft_type_has_diagonal_momentum_states(): + m1_pspec = param_pspec + if quantized_dtype_for_momentum_buffers() != jnp.float32: + m1_scale_pspec = _remove_leading_sharding_annotation(m1_pspec) + + m2_pspec = param_pspec + m2_scale_pspec = [] + if quantized_dtype_for_momentum_buffers() != jnp.float32: + m2_scale_pspec = _remove_leading_sharding_annotation(m2_pspec) + + local_stats_flat.append( + LocalShardedParameterStats( + QuantizedValue( + diagonal_statistics_pspec, + [], + diagonal_statistics_scale_pspec, + quantized_dtype_for_diagonal_statistics_buffers(), + False, + list(param.shape), + ), + QuantizedValue( + m1_pspec, + [], + m1_scale_pspec, + quantized_dtype_for_momentum_buffers(), + False, + list(param.shape), + ), + QuantizedValue( + m2_pspec, + [], + m2_scale_pspec, + quantized_dtype_for_momentum_buffers(), + False, + list(param.shape), + ), + init_training_metrics_pspec(), + index_start, + sizes, + ) + ) + + local_stats = jax.tree_unflatten(treedef, local_stats_flat) + global_stats = GlobalShardedParameterStats( + partition_spec_for_statistics, + partition_spec_for_statistics, + pjit.PartitionSpec(), + ) + count_pspec = pjit.PartitionSpec() + return ShampooState( + count=count_pspec, stats=ShardedShampooStats(global_stats, local_stats) + ) + + def sharded_init_shape_and_dtype_fn(params): + """Returns a parallel state tree with shape, dtype associated with state. + + + Args: + params: A pytree with params. + """ + # Parallel lists of spec, and params. + params_flat, treedef = jax.tree_flatten(params) + assert params_flat + # Step is replicated across cores. + # None means cores. + local_stats_flat = [] + num_statistics = 0 + for param in params_flat: + param_clone = jnp.zeros(param.shape, dtype=param.dtype) + preconditioner = Preconditioner( + param_clone, block_size, best_effort_shape_interpretation + ) + shapes = preconditioner.shapes_for_preconditioners() + sizes = [] + + index_start = num_statistics + if not _skip_preconditioning(param): + sizes = [s[0] for s in shapes] + shapes = preconditioner.shapes_for_preconditioners() + num_statistics += len(shapes) + + diagonal_statistics_shape_and_dtype = [] + diagonal_statistics_scale_shape_and_dtype = [] + if _graft_type_has_diagonal_statistics(): + diagonal_statistics_shape_and_dtype = [list(param.shape), param.dtype] + qdtype = quantized_dtype_for_diagonal_statistics_buffers() + if qdtype != jnp.float32: + diagonal_statistics_shape_and_dtype = [list(param.shape), qdtype] + diagonal_statistics_scale_shape_and_dtype = [ + list(param.shape)[1:], + param.dtype, + ] + + qdtype = quantized_dtype_for_momentum_buffers() + m1_shape_and_dtype = [] + m1_scale_shape_and_dtype = [] + if _graft_type_has_diagonal_momentum_states(): + m1_shape_and_dtype = [list(param.shape), qdtype] + if quantized_dtype_for_momentum_buffers() != jnp.float32: + m1_scale_shape_and_dtype = [list(param.shape)[1:], qdtype] + + m2_shape_and_dtype = [list(param.shape), param.dtype] + m2_scale_shape_and_dtype = [] + if qdtype != jnp.float32: + m2_shape_and_dtype = [list(param.shape), qdtype] + m2_scale_shape_and_dtype = [list(param.shape)[1:], qdtype] + + local_stats_flat.append( + LocalShardedParameterStats( + QuantizedValue( + diagonal_statistics_shape_and_dtype, + [], + diagonal_statistics_scale_shape_and_dtype, + quantized_dtype_for_diagonal_statistics_buffers(), + False, + list(param.shape), + ), + QuantizedValue( + m1_shape_and_dtype, + [], + m1_scale_shape_and_dtype, + quantized_dtype_for_momentum_buffers(), + False, + list(param.shape), + ), + QuantizedValue( + m2_shape_and_dtype, + [], + m2_scale_shape_and_dtype, + quantized_dtype_for_momentum_buffers(), + False, + list(param.shape), + ), + init_training_metrics_shapes(len(sizes)), + index_start, + sizes, + ) + ) + + local_stats = jax.tree_unflatten(treedef, local_stats_flat) + max_statistics_size = _max_statistics_size_from_params(params_flat) + to_pad = -num_statistics % num_devices_for_pjit + num_statistics += to_pad + if num_statistics == 0: + num_statistics = num_devices_for_pjit + max_statistics_size = block_size + statistics_shape = [num_statistics, max_statistics_size, max_statistics_size] + global_stats = GlobalShardedParameterStats( + [statistics_shape, jnp.float32], + [statistics_shape, jnp.float32], + [[num_statistics], jnp.int32], + ) + return ShampooState( + count=[[], jnp.float32], + stats=ShardedShampooStats(global_stats, local_stats), + ) + + def sharded_update_fn(grads, state, params): + """Transform the input gradient and update all statistics in sharded mode. + + Args: + grads: the gradient tensors for the parameters. + state: a named tuple containing the state of the optimizer + params: the parameters that should be updated. + + Returns: + A tuple containing the new parameters and the new optimizer state. + """ + params_flat, treedef = jax.tree_flatten(params) + grads_flat = treedef.flatten_up_to(grads) + + global_stats = state.stats.global_stats + local_stats_flat = treedef.flatten_up_to(state.stats.local_stats) + stats_flat = [ + _convert_to_parameter_stats(global_stats, local_stat) + for local_stat in local_stats_flat + ] + new_stats_flat = jax.tree_multimap( + lambda g, s, p: _compute_stats(g, s, p, state.count), + grads_flat, + stats_flat, + params_flat, + ) + + outputs = jax.tree_multimap( + lambda g, s, p: _transform_grad(g, s, p, state.count), + grads_flat, + new_stats_flat, + params_flat, + ) + updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ()) + + updates = jax.tree_unflatten(treedef, updates_flat) + # Create new local_stats + new_local_stats_flat = [ + _convert_from_parameter_stats(new_stat, local_stat) + for new_stat, local_stat in zip(new_stats_flat, local_stats_flat) + ] + + max_size = global_stats.statistics.shape[1] + new_padded_statistics = [] + for stat in new_stats_flat: + new_padded_statistics.extend( + [pad_square_matrix(stat, max_size) for stat in stat.statistics] + ) + + # Create global stats + # TODO(rohananil): Preconditioner is not updated every step, so cost of + # stack/pad can be obviated away. + # Pad the statistics and preconditioner matrices to be a multiple of + # num devices. + # TODO(rohananil): Relax to only the size of the mesh axis where the dim + # is split on. + to_pad = -len(new_padded_statistics) % num_devices_for_pjit + new_padded_statistics.extend( + [ + jnp.eye(max_size, dtype=new_padded_statistics[0].dtype) + for _ in range(to_pad) + ] + ) + new_stacked_padded_statistics = jnp.stack(new_padded_statistics) + new_stacked_padded_statistics = pjit.with_sharding_constraint( + new_stacked_padded_statistics, statistics_partition_spec + ) + + def _internal_inverse_pth_root_all(): + preconditioners, errors = _matrix_inverse_pth_root_pjit( + new_stacked_padded_statistics, + global_stats.exponents, + statistics_partition_spec, + ) + return preconditioners, errors + + if preconditioning_compute_steps == 1: + new_preconditioners, errors = _internal_inverse_pth_root_all() + else: + # Passing statistics instead of preconditioners as they are similarly + # shaped tensors. Note statistics will be ignored as we are passing in + # a large init value for error. + preconditioners_init = new_stacked_padded_statistics + n = new_stacked_padded_statistics.shape[0] + errors_init = jnp.ones([n], jnp.float32) * inverse_failure_threshold + init_state = [preconditioners_init, errors_init] + perform_step = state.count % preconditioning_compute_steps == 0 + new_preconditioners, errors = efficient_cond( + perform_step, _internal_inverse_pth_root_all, init_state + ) + + new_local_stats_flat = _add_error_into_local_stats( + new_local_stats_flat, errors, inverse_failure_threshold + ) + new_local_stats = jax.tree_unflatten(treedef, new_local_stats_flat) + errors = errors.reshape((-1, 1, 1)) + predicate = jnp.logical_or( + jnp.isnan(errors), errors >= inverse_failure_threshold + ).astype(new_preconditioners.dtype) + # TODO(rohananil): Check for numerical instabilities. + new_conditional_preconditioners = ( + predicate * global_stats.preconditioners + + (1.0 - predicate) * new_preconditioners + ) + new_global_stats = GlobalShardedParameterStats( + new_stacked_padded_statistics, + new_conditional_preconditioners, + global_stats.exponents, + ) + new_shampoo_state = ShampooState( + count=state.count + 1, + stats=ShardedShampooStats(new_global_stats, new_local_stats), + ) + return updates, new_shampoo_state + + def init_fn(params): + """Initialise the optimiser's state.""" + + def _init(param): + preconditioner = Preconditioner( + param, block_size, best_effort_shape_interpretation + ) + statistics = [] + preconditioners = [] + if not _skip_preconditioning(param): + shapes = preconditioner.shapes_for_preconditioners() + statistics = [ + matrix_epsilon * jnp.eye(s[0], dtype=jnp.float32) for s in shapes + ] + preconditioners = [jnp.eye(s[0], dtype=jnp.float32) for s in shapes] + + diagonal_statistics = [] + if _graft_type_has_diagonal_statistics(): + diagonal_statistics = jnp.zeros_like(param) + + diagonal_momentum = _quantize_momentum([]) + momentum = _quantize_momentum(jnp.zeros_like(param)) + if _graft_type_has_diagonal_momentum_states(): + diagonal_momentum = _quantize_momentum(jnp.zeros_like(param)) + + return ParameterStats( + _quantize_diagonal_statistics(diagonal_statistics), + _maybe_quantize_statistics(statistics), + _maybe_quantize_preconditioners(preconditioners), + diagonal_momentum, + momentum, + init_training_metrics(len(statistics)), + ) + + return ShampooState( + count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params) + ) + + def _skip_preconditioning(param): + return len(param.shape) < 1 or any( + [s > skip_preconditioning_dim_size_gt for s in param.shape] + ) + + def _compute_stats(grad, state, param, step): + """Compute per-parameter statistics.""" + preconditioner = Preconditioner( + param, block_size, best_effort_shape_interpretation + ) + new_statistics = [[]] * len(state.statistics) + w1 = beta2 + w2 = beta2 if beta2 == 1.0 else (1.0 - beta2) + if not _skip_preconditioning(param): + + def compute_updated_statistics(): + new_stats = preconditioner.statistics_from_grad(grad) + new_stats_accumulators = [] + for stat, stat_accumulator in zip(new_stats, state.statistics): + new_stats_accumulators.append( + w1 * _to_float(stat_accumulator) + w2 * stat + ) + return _maybe_quantize_statistics(new_stats_accumulators) + + if statistics_compute_steps > 1: + perform_step = step % statistics_compute_steps == 0 + init_state = state.statistics + new_statistics = list( + efficient_cond(perform_step, compute_updated_statistics, init_state) + ) + else: + new_statistics = compute_updated_statistics() + return ParameterStats( + state.diagonal_statistics, + new_statistics, + state.preconditioners, + state.diagonal_momentum, + state.momentum, + state.training_metrics, + ) + + def _matrix_inverse_pth_root_vmap(xs, ps): + mi_pth_root = functools.partial( + matrix_inverse_pth_root, ridge_epsilon=matrix_epsilon, precision=precision + ) + return jax.vmap(mi_pth_root)(xs, ps) + + def _quantized_matrix_inverse_pth_root_vmap(qxs, qds, qbs, ps): + def _quantized_to_float(qx, qd, qb): + qv = QuantizedValue(qx, qd, qb, qx.dtype, True, list(qx.shape)) + return qv.to_float() + + def matrix_inverse_pth_root_wrapper(qx, qd, qb, p): + v = _quantized_to_float(qx, qd, qb) + preconditioner, error = matrix_inverse_pth_root( + v, p, ridge_epsilon=matrix_epsilon, precision=precision + ) + qp = QuantizedValue.from_float_value(preconditioner, qx.dtype, True) + return qp.quantized, qp.diagonal, qp.bucket_size, error + + return jax.vmap(matrix_inverse_pth_root_wrapper)(qxs, qds, qbs, ps) + + def _matrix_inverse_pth_root_pjit(xs, ps, statistics_partition_spec=None): + # Partition the concatenated statistics matrix across all cores. + pspec_for_partition = preconditioner_partition_spec + partitioned_xs = pjit.with_sharding_constraint(xs, pspec_for_partition) + partitioned_ps = pjit.with_sharding_constraint( + ps, pjit.PartitionSpec(preconditioner_partition_spec[0]) + ) + # Run matrix inverse pth root on each shard. + partitioned_preconditioners, partitioned_errors = _matrix_inverse_pth_root_vmap( + partitioned_xs, partitioned_ps + ) + # Reshard output to have the same PSpec as input. This is required to avoid + # vmap seeing the full set of statistics. + partitioned_preconditioners = pjit.with_sharding_constraint( + partitioned_preconditioners, pspec_for_partition + ) + # Recombine the outputs at each core. + preconditioners = pjit.with_sharding_constraint( + partitioned_preconditioners, statistics_partition_spec + ) + errors = pjit.with_sharding_constraint(partitioned_errors, pjit.PartitionSpec()) + return preconditioners, errors + + def _pmap_compute_preconditioners( + states, + step, + statistics, + num_statistics_per_state, + original_shapes, + exponents, + max_size, + prev_preconditioners, + ): + """Computes preconditioners for given statistics in states in PMAP mode. + + Args: + states: A list of optimizer states. + step: Current step number + statistics: A list of statistics for all variables (for every dim) + num_statistics_per_state: Number of statistis per state to reconstruct + output states. + original_shapes: A list of shapes of the statistics. + exponents: Exponent power to use for inverse-pth roots. + max_size: Maximum dim of the statistics to pad. + prev_preconditioners: Previously available preconditioner. + + Returns: + New optimizer states after computing the preconditioner. + """ + num_devices = lax.psum(1, batch_axis_name) + num_statistics = len(statistics) + # Pad statistics and exponents to next multiple of num_devices. + packed_statistics = [pad_square_matrix(stat, max_size) for stat in statistics] + to_pad = -num_statistics % num_devices + packed_statistics.extend( + [jnp.eye(max_size, dtype=packed_statistics[0].dtype) for _ in range(to_pad)] + ) + exponents.extend([1 for _ in range(to_pad)]) + + if not packed_statistics: + return states + + all_statistics = batch(packed_statistics, num_devices) + all_exponents = batch(exponents, num_devices) + + def _internal_inverse_pth_root_all(): + current_replica = lax.axis_index(batch_axis_name) + preconditioners, errors = _matrix_inverse_pth_root_vmap( + all_statistics[current_replica], all_exponents[current_replica] + ) + preconditioners = jax.lax.all_gather(preconditioners, batch_axis_name) + errors = jax.lax.all_gather(errors, batch_axis_name) + preconditioners_flat = unbatch(preconditioners) + errors_flat = unbatch(errors) + return preconditioners_flat, errors_flat + + if preconditioning_compute_steps == 1: + preconditioners_flat, errors_flat = _internal_inverse_pth_root_all() + else: + # Passing statistics instead of preconditioners as they are similarly + # shaped tensors. Note statistics will be ignored as we are passing in + # a large init value for error. + preconditioners_init = packed_statistics + errors_init = [inverse_failure_threshold] * len(packed_statistics) + init_state = [preconditioners_init, errors_init] + perform_step = step % preconditioning_compute_steps == 0 + preconditioners_flat, errors_flat = efficient_cond( + perform_step, _internal_inverse_pth_root_all, init_state + ) + + def _skip(error): + condition = jnp.logical_or( + jnp.isnan(error), error >= inverse_failure_threshold + ) + return condition.astype(error.dtype) + + def _select_preconditioner(error, new_p, old_p): + return lax.cond( + _skip(error), lambda _: old_p, lambda _: new_p, operand=None + ) + + new_preconditioners_flat = [] + new_errors_flat = [] + for p, shape, prev_p, error in zip( + preconditioners_flat, original_shapes, prev_preconditioners, errors_flat + ): + new_preconditioners_flat.append( + _select_preconditioner(error, p[: shape[0], : shape[1]], prev_p) + ) + new_errors_flat.append(error) + + assert len(states) == len(num_statistics_per_state) + assert len(new_preconditioners_flat) == num_statistics + assert len(new_errors_flat) == num_statistics + + # Add back empty preconditioners so we that we can set the optimizer state. + preconditioners_for_states = [] + idx = 0 + errors_for_states = [] + for num_statistics, state in zip(num_statistics_per_state, states): + if num_statistics == 0: + preconditioners_for_states.append([]) + errors_for_states.append([]) + else: + preconditioners_for_state = new_preconditioners_flat[ + idx : idx + num_statistics + ] + assert len(state.statistics) == len(preconditioners_for_state) + preconditioners_for_states.append(preconditioners_for_state) + + errors_for_state = jnp.stack( + new_errors_flat[idx : idx + num_statistics] + ) + assert len(state.statistics) == len(errors_for_state) + errors_for_states.append(errors_for_state) + + idx += num_statistics + new_states = [] + for state, new_preconditioners, new_errors in zip( + states, preconditioners_for_states, errors_for_states + ): + if state.statistics: + new_errors = jnp.where( + jnp.logical_and( + new_errors > 0.0, new_errors != inverse_failure_threshold + ), + new_errors, + state.training_metrics.inverse_pth_root_errors, + ) + new_training_metrics = TrainingMetrics(new_errors) + new_states.append( + ParameterStats( + state.diagonal_statistics, + state.statistics, + new_preconditioners, + state.diagonal_momentum, + state.momentum, + new_training_metrics, + ) + ) + + return new_states + + def _pmap_quantized_compute_preconditioners( + states, + step, + statistics, + num_statistics_per_state, + original_shapes, + exponents, + max_size, + prev_preconditioners, + ): + """Computes preconditioners for given statistics in states in PMAP mode. + + For quantization, each statistic is represented by three values: + quantized matrix, diagonal, and bucket sizes, we run inverse pth-roots + without ever recreating the original matrix in f32. + + Args: + states: A list of optimizer states. + step: Current step number + statistics: A list of statistics for all variables (for every dim) + num_statistics_per_state: Number of statistis per state to reconstruct + output states. + original_shapes: A list of shapes of the statistics. + exponents: Exponent power to use for inverse-pth roots. + max_size: Maximum dim of the statistics to pad. + prev_preconditioners: Previously available preconditioner. + + Returns: + New optimizer states after computing the preconditioner. + """ + num_devices = lax.psum(1, batch_axis_name) + num_statistics = len(statistics) + quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers() + # Complexity here is around: shapes needing be statically shaped, + # our custom quantization type requires a different type of packing. + + # Parallel tensors: + # quantized [dxd] + # diagonals [d] f32 + # bucket_sizes [d] f32 + packed_quantized_statistics = [ + pad_square_matrix(stat.quantized, max_size) for stat in statistics + ] + packed_quantized_diagonals = [ + pad_vector(stat.diagonal, max_size) for stat in statistics + ] + packed_quantized_bucket_sizes = [ + pad_vector(stat.bucket_size, max_size) for stat in statistics + ] + + to_pad = -num_statistics % num_devices + padded_eye = jnp.eye(max_size, dtype=jnp.float32) + quantized_eye = QuantizedValue.from_float_value( + padded_eye, quantized_dtype, True + ) + packed_quantized_statistics.extend( + [quantized_eye.quantized for _ in range(to_pad)] + ) + packed_quantized_diagonals.extend( + [quantized_eye.diagonal for _ in range(to_pad)] + ) + packed_quantized_bucket_sizes.extend( + [quantized_eye.bucket_size for _ in range(to_pad)] + ) + exponents.extend([1 for _ in range(to_pad)]) + + if not packed_quantized_statistics: + return states + + all_quantized_statistics = batch(packed_quantized_statistics, num_devices) + all_quantized_diagonals = batch(packed_quantized_diagonals, num_devices) + all_quantized_bucket_sizes = batch(packed_quantized_bucket_sizes, num_devices) + all_exponents = batch(exponents, num_devices) + + def _internal_inverse_pth_root_all(): + current_replica = lax.axis_index(batch_axis_name) + ( + quantized_preconditioners, + quantized_diagonals, + quantized_bucket_sizes, + errors, + ) = _quantized_matrix_inverse_pth_root_vmap( + all_quantized_statistics[current_replica], + all_quantized_diagonals[current_replica], + all_quantized_bucket_sizes[current_replica], + all_exponents[current_replica], + ) + quantized_preconditioners = jax.lax.all_gather( + quantized_preconditioners, batch_axis_name + ) + quantized_diagonals = jax.lax.all_gather( + quantized_diagonals, batch_axis_name + ) + quantized_bucket_sizes = jax.lax.all_gather( + quantized_bucket_sizes, batch_axis_name + ) + errors = jax.lax.all_gather(errors, batch_axis_name) + quantized_preconditioners_flat = unbatch(quantized_preconditioners) + quantized_diagonals_flat = unbatch(quantized_diagonals) + quantized_bucket_sizes_flat = unbatch(quantized_bucket_sizes) + errors_flat = unbatch(errors) + return ( + quantized_preconditioners_flat, + quantized_diagonals_flat, + quantized_bucket_sizes_flat, + errors_flat, + ) + + if preconditioning_compute_steps == 1: + ( + quantized_preconditioners_flat, + quantized_diagonals_flat, + quantized_bucket_sizes_flat, + errors_flat, + ) = _internal_inverse_pth_root_all() + else: + # Passing statistics instead of preconditioners as they are similarly + # shaped tensors. Note statistics will be ignored as we are passing in + # a large init value for error. + quantized_preconditioners_init = packed_quantized_statistics + quantized_diagonals_init = packed_quantized_diagonals + quantized_bucket_sizes_init = packed_quantized_bucket_sizes + errors_init = [inverse_failure_threshold] * len( + quantized_preconditioners_init + ) + init_state = [ + quantized_preconditioners_init, + quantized_diagonals_init, + quantized_bucket_sizes_init, + errors_init, + ] + perform_step = step % preconditioning_compute_steps == 0 + ( + quantized_preconditioners_flat, + quantized_diagonals_flat, + quantized_bucket_sizes_flat, + errors_flat, + ) = efficient_cond(perform_step, _internal_inverse_pth_root_all, init_state) + + def _skip(error): + condition = jnp.logical_or( + jnp.isnan(error), error >= inverse_failure_threshold + ) + return condition.astype(error.dtype) + + def _select_preconditioner(error, new_p, old_p): + return lax.cond( + _skip(error), lambda _: old_p, lambda _: new_p, operand=None + ) + + new_quantized_preconditioners_flat = [] + new_quantized_diagonals_flat = [] + new_quantized_bucket_sizes_flat = [] + new_errors_flat = [] + for p, d, b, shape, prev_p, error in zip( + quantized_preconditioners_flat, + quantized_diagonals_flat, + quantized_bucket_sizes_flat, + original_shapes, + prev_preconditioners, + errors_flat, + ): + new_quantized_preconditioners_flat.append( + _select_preconditioner( + error, p[: shape[0], : shape[1]], prev_p.quantized + ) + ) + new_quantized_diagonals_flat.append( + _select_preconditioner(error, d[: shape[0]], prev_p.diagonal) + ) + new_quantized_bucket_sizes_flat.append( + _select_preconditioner(error, b[: shape[0]], prev_p.bucket_size) + ) + new_errors_flat.append(error) + + assert len(states) == len(num_statistics_per_state) + assert len(new_quantized_preconditioners_flat) == num_statistics + assert len(new_quantized_diagonals_flat) == num_statistics + assert len(new_quantized_bucket_sizes_flat) == num_statistics + + # Add back empty preconditioners so we that we can set the optimizer state. + preconditioners_for_states = [] + errors_for_states = [] + idx = 0 + for num_statistics, state in zip(num_statistics_per_state, states): + if num_statistics == 0: + preconditioners_for_states.append([]) + errors_for_states.append([]) + else: + quantized_preconditioners_for_state = ( + new_quantized_preconditioners_flat[idx : idx + num_statistics] + ) + quantized_diagonals_for_state = new_quantized_diagonals_flat[ + idx : idx + num_statistics + ] + quantized_bucket_sizes_for_state = new_quantized_bucket_sizes_flat[ + idx : idx + num_statistics + ] + errors_for_state = jnp.stack( + new_errors_flat[idx : idx + num_statistics] + ) + + assert len(state.statistics) == len(quantized_preconditioners_for_state) + assert len(state.statistics) == len(quantized_diagonals_for_state) + assert len(state.statistics) == len(quantized_bucket_sizes_for_state) + assert len(state.statistics) == len(errors_for_state) + + quantized_preconditioners = [] + for qv, qd, qb in zip( + quantized_preconditioners_for_state, + quantized_diagonals_for_state, + quantized_bucket_sizes_for_state, + ): + quantized_preconditioners.append( + QuantizedValue(qv, qd, qb, qv.dtype, True, list(qv.shape)) + ) + preconditioners_for_states.append(quantized_preconditioners) + errors_for_states.append(errors_for_state) + idx += num_statistics + new_states = [] + for state, new_preconditioners, new_errors in zip( + states, preconditioners_for_states, errors_for_states + ): + if state.statistics: + new_errors = jnp.where( + jnp.logical_and( + new_errors > 0.0, new_errors != inverse_failure_threshold + ), + new_errors, + state.training_metrics.inverse_pth_root_errors, + ) + new_training_metrics = TrainingMetrics(new_errors) + new_states.append( + ParameterStats( + state.diagonal_statistics, + state.statistics, + new_preconditioners, + state.diagonal_momentum, + state.momentum, + new_training_metrics, + ) + ) + + return new_states + + def _pjit_compute_preconditioners( + states, + step, + statistics, + num_statistics_per_state, + original_shapes, + exponents, + max_size, + prev_preconditioners, + ): + """Computes preconditioners for given statistics in states in PJIT mode. + + Args: + states: A list of optimizer states. + step: Current step number + statistics: A list of statistics for all variables (for every dim) + num_statistics_per_state: Number of statistis per state to reconstruct + output states. + original_shapes: A list of shapes of the statistics. + exponents: Exponent power to use for inverse-pth roots. + max_size: Maximum dim of the statistics to pad. + prev_preconditioners: Previously available preconditioner. + + Returns: + New optimizer states after computing the preconditioner. + """ + num_statistics = len(statistics) + to_pad = -num_statistics % num_devices_for_pjit + padded_statistics = [pad_square_matrix(stat, max_size) for stat in statistics] + padded_statistics.extend( + [jnp.eye(max_size, dtype=padded_statistics[0].dtype) for _ in range(to_pad)] + ) + exponents.extend([1 for _ in range(to_pad)]) + all_statistics = jnp.stack(padded_statistics) + all_exponents = jnp.stack(exponents) + + def _internal_inverse_pth_root_all(): + preconditioners, errors = _matrix_inverse_pth_root_pjit( + all_statistics, all_exponents + ) + b1 = preconditioners.shape[0] + + def split(batched_values): + return [ + jnp.squeeze(v) + for v in jnp.split(batched_values, indices_or_sections=b1, axis=0) + ] + + return split(preconditioners), split(errors) + + if preconditioning_compute_steps == 1: + preconditioners_flat, errors_flat = _internal_inverse_pth_root_all() + else: + # Passing statistics instead of preconditioners as they are similarly + # shaped tensors. Note statistics will be ignored as we are passing in + # a large init value for error. + preconditioners_init = padded_statistics + errors_init = [inverse_failure_threshold] * len(padded_statistics) + init_state = [preconditioners_init, errors_init] + perform_step = step % preconditioning_compute_steps == 0 + preconditioners_flat, errors_flat = efficient_cond( + perform_step, _internal_inverse_pth_root_all, init_state + ) + + def _skip(error): + condition = jnp.logical_or( + jnp.isnan(error), error >= inverse_failure_threshold + ) + return condition.astype(error.dtype) + + def _select_preconditioner(error, new_p, old_p): + return lax.cond( + _skip(error), lambda _: old_p, lambda _: new_p, operand=None + ) + + new_preconditioners_flat = [] + new_errors_flat = [] + for p, shape, prev_p, error in zip( + preconditioners_flat, original_shapes, prev_preconditioners, errors_flat + ): + new_preconditioners_flat.append( + _select_preconditioner(error, p[: shape[0], : shape[1]], prev_p) + ) + new_errors_flat.append(error) + + assert len(states) == len(num_statistics_per_state) + assert len(new_preconditioners_flat) == num_statistics + + # Add back empty preconditioners so we that we can set the optimizer state. + preconditioners_for_states = [] + errors_for_states = [] + idx = 0 + for num_statistics, state in zip(num_statistics_per_state, states): + if num_statistics == 0: + preconditioners_for_states.append([]) + errors_for_states.append([]) + else: + preconditioners_for_state = new_preconditioners_flat[ + idx : idx + num_statistics + ] + assert len(state.statistics) == len(preconditioners_for_state) + preconditioners_for_states.append(preconditioners_for_state) + + errors_for_state = jnp.stack( + new_errors_flat[idx : idx + num_statistics] + ) + assert len(state.statistics) == len(errors_for_state) + errors_for_states.append(errors_for_state) + idx += num_statistics + + new_states = [] + for state, new_preconditioners, new_errors in zip( + states, preconditioners_for_states, errors_for_states + ): + if state.statistics: + new_errors = jnp.where( + jnp.logical_and( + new_errors > 0.0, new_errors != inverse_failure_threshold + ), + new_errors, + state.training_metrics.inverse_pth_root_errors, + ) + new_training_metrics = TrainingMetrics(new_errors) + new_states.append( + ParameterStats( + state.diagonal_statistics, + state.statistics, + new_preconditioners, + state.diagonal_momentum, + state.momentum, + new_training_metrics, + ) + ) + + return new_states + + def _compute_preconditioners(states, params, step): + """Computes preconditioners for given statistics in states. + + Args: + states: A list of optimizer states. + params: A list of params. + step: Current step number + + Returns: + New optimizer states after computing the preconditioner. + """ + statistics = [] + num_statistics_per_state = [] + original_shapes = [] + exponents = [] + max_size = 0 + prev_preconditioners = [] + + for state, param in zip(states, params): + num_statistics = len(state.statistics) + num_statistics_per_state.append(num_statistics) + original_shapes_for_state = [] + if num_statistics > 0: + preconditioner = Preconditioner( + param, block_size, best_effort_shape_interpretation + ) + for statistic in state.statistics: + exponents.append( + preconditioner.exponent_for_preconditioner() + if exponent_override == 0 + else exponent_override + ) + original_shapes_for_state.append(statistic.shape) + max_size = max(max_size, statistic.shape[0]) + + statistics.extend(state.statistics) + prev_preconditioners.extend(state.preconditioners) + original_shapes.extend(original_shapes_for_state) + + if batch_axis_name: + # Quantization is only enabled if batch_axis_name is not set. + quantized_dtype = quantized_dtype_for_second_moment_statistics_buffers() + + if quantized_dtype == jnp.float32: + return _pmap_compute_preconditioners( + states, + step, + statistics, + num_statistics_per_state, + original_shapes, + exponents, + max_size, + prev_preconditioners, + ) + else: + return _pmap_quantized_compute_preconditioners( + states, + step, + statistics, + num_statistics_per_state, + original_shapes, + exponents, + max_size, + prev_preconditioners, + ) + + else: + return _pjit_compute_preconditioners( + states, + step, + statistics, + num_statistics_per_state, + original_shapes, + exponents, + max_size, + prev_preconditioners, + ) + + def _transform_grad(grad, state, param, step): + """Transform per-parameter gradients.""" + preconditioner = Preconditioner( + param, block_size, best_effort_shape_interpretation + ) + sgd_update = grad + new_diagonal_statistics = state.diagonal_statistics.to_float() + if ( + graft_type == GraftingType.ADAGRAD + or graft_type == GraftingType.ADAGRAD_NORMALIZED + ): + + scaled_grad = grad + if graft_type == GraftingType.ADAGRAD_NORMALIZED: + scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16) + + new_diagonal_statistics = state.diagonal_statistics.to_float() + jnp.square( + scaled_grad + ) + adagrad_update = scaled_grad / ( + jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon + ) + grafting_update = adagrad_update + elif ( + graft_type == GraftingType.RMSPROP + or graft_type == GraftingType.RMSPROP_NORMALIZED + ): + + scaled_grad = grad + if graft_type == GraftingType.RMSPROP_NORMALIZED: + scaled_grad = grad / (jnp.linalg.norm(grad) + 1e-16) + + w1 = beta2 + w2 = beta2 if beta2 == 1.0 else (1.0 - beta2) + + new_diagonal_statistics = ( + w1 * state.diagonal_statistics.to_float() + w2 * jnp.square(scaled_grad) + ) + rmsprop_update = scaled_grad / ( + jnp.sqrt(new_diagonal_statistics) + diagonal_epsilon + ) + + if clip_by_scaled_gradient_norm: + scaled_grad_norm = jnp.linalg.norm(rmsprop_update) / ( + jnp.sqrt(float(rmsprop_update.size)) + ) + clipping_denom = jnp.maximum( + 1.0, scaled_grad_norm / clip_by_scaled_gradient_norm + ) + rmsprop_update /= clipping_denom + + grafting_update = rmsprop_update + elif graft_type == GraftingType.SGD: + grafting_update = sgd_update + else: + grafting_update = jnp.ones_like(sgd_update) * jnp.sign(sgd_update) + + precond_grad = grad + if not _skip_preconditioning(param): + precond_grad = preconditioner.preconditioned_grad( + precond_grad, _maybe_dequantize_preconditioners(state.preconditioners) + ) + else: + precond_grad = grafting_update + + grafting_update_norm = jnp.linalg.norm(grafting_update) + precond_grad_norm = jnp.linalg.norm(precond_grad) + + multiplier = grafting_update_norm / (precond_grad_norm + 1e-16) + shampoo_update = precond_grad * multiplier + + shampoo_update_with_wd = shampoo_update + grafting_update_with_wd = grafting_update + if weight_decay != 0: + shampoo_update_with_wd = shampoo_update + weight_decay * param + grafting_update_with_wd = grafting_update + weight_decay * param + + w = (1.0 - beta1) if moving_average_for_momentum else 1.0 + + shampoo_update_with_wd_momentum = ( + state.momentum.to_float() * beta1 + w * shampoo_update_with_wd + ) + + if _graft_type_has_diagonal_momentum_states(): + grafting_update_with_wd_momentum = ( + state.diagonal_momentum.to_float() * beta1 + w * grafting_update_with_wd + ) + else: + # Share the momentum buffer + grafting_update_with_wd_momentum = ( + state.momentum.to_float() * beta1 + w * grafting_update_with_wd + ) + + run_shampoo = (step >= start_preconditioning_step).astype( + grafting_update_with_wd_momentum.dtype + ) + + momentum_update = ( + run_shampoo * shampoo_update_with_wd_momentum + + (1.0 - run_shampoo) * grafting_update_with_wd_momentum + ) + + wd_update = ( + run_shampoo * shampoo_update_with_wd + + (1.0 - run_shampoo) * grafting_update_with_wd + ) + + nesterov_momentum_update = momentum_update + if nesterov: + nesterov_momentum_update = w * wd_update + beta1 * momentum_update + + lr = learning_rate + if callable(learning_rate): + lr = learning_rate(step) + transformed_update = -1.0 * lr * nesterov_momentum_update + + new_diagonal_momentum = grafting_update_with_wd_momentum + new_momentum = shampoo_update_with_wd_momentum + if not _graft_type_has_diagonal_momentum_states(): + new_diagonal_momentum = [] + new_momentum = momentum_update + + param_stats = ParameterStats( + _quantize_diagonal_statistics(new_diagonal_statistics), + state.statistics, + state.preconditioners, + _quantize_momentum(new_diagonal_momentum), + _quantize_momentum(new_momentum), + state.training_metrics, + ) + + return transformed_update, param_stats + + def update_fn(grads, state, params): + """Transform the input gradient and update all statistics. + + Args: + grads: the gradient tensors for the parameters. + state: a named tuple containing the state of the optimizer + params: the parameters that should be updated. + + Returns: + A tuple containing the new parameters and the new optimizer state. + """ + params_flat, treedef = jax.tree_flatten(params) + stats_flat = treedef.flatten_up_to(state.stats) + grads_flat = treedef.flatten_up_to(grads) + + new_stats_flat = jax.tree_multimap( + lambda g, s, p: _compute_stats(g, s, p, state.count), + grads_flat, + stats_flat, + params_flat, + ) + new_stats_flat = _compute_preconditioners( + new_stats_flat, params_flat, state.count + ) + outputs = jax.tree_multimap( + lambda g, s, p: _transform_grad(g, s, p, state.count), + grads_flat, + new_stats_flat, + params_flat, + ) + updates_flat, new_stats_flat = list(zip(*outputs)) if outputs else ((), ()) + + updates = jax.tree_unflatten(treedef, updates_flat) + new_stats = jax.tree_unflatten(treedef, new_stats_flat) + + new_state = ShampooState(count=state.count + 1, stats=new_stats) + return updates, new_state + + if shard_optimizer_states: + # Hijacks the init_fn signature so we can return an OptState with + # appropriate init_fns. + def _init_fns(unused_params): + return InitFnState( + init_fn=sharded_init_fn, + pspec_fn=sharded_init_partition_spec_fn, + shape_and_dtype_fn=sharded_init_shape_and_dtype_fn, + ) + + return optax.GradientTransformation(_init_fns, sharded_update_fn) + else: + return optax.GradientTransformation(init_fn, update_fn) diff --git a/tools/train/scalable_shampoo/quantization_utils.py b/tools/train/scalable_shampoo/quantization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b413d6eafd26d1bd7c3082db05917a7da0d5672b --- /dev/null +++ b/tools/train/scalable_shampoo/quantization_utils.py @@ -0,0 +1,124 @@ +# coding=utf-8 +# Copyright 2022 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Helper routines for quantization.""" + +from typing import Any + +import chex +import jax.numpy as jnp +from flax import struct + + +# pylint:disable=no-value-for-parameter +@struct.dataclass +class QuantizedValue: + """State associated with quantized value.""" + + quantized: chex.Array + diagonal: chex.Array # Diagonal (if extract_diagonal is set) + bucket_size: chex.Array + quantized_dtype: jnp.dtype = struct.field( + pytree_node=False + ) # Dtype for the quantized value. + extract_diagonal: bool = struct.field(pytree_node=False) # In case its centered. + shape: Any = struct.field(pytree_node=False) # Shape of the tensor. + + @classmethod + def from_float_value(cls, fvalue, quantized_dtype, extract_diagonal=False): + if isinstance(fvalue, list) and not fvalue: + return QuantizedValue([], [], [], quantized_dtype, extract_diagonal, []) + quantized, diagonal_fvalue, bucket_size = QuantizedValue.quantize( + fvalue, quantized_dtype, extract_diagonal + ) + return QuantizedValue( + quantized, + diagonal_fvalue, + bucket_size, + quantized_dtype, + extract_diagonal, + list(quantized.shape), + ) + + # Quantization is from Lingvo JAX optimizers. + # We extend it for int16 quantization of PSD matrices. + @classmethod + def quantize(cls, fvalue, quantized_dtype, extract_diagonal=False): + """Returns quantized value and the bucket.""" + if quantized_dtype == jnp.float32: + return fvalue, [], [] + elif quantized_dtype == jnp.bfloat16: + return fvalue.astype(jnp.bfloat16), [], [] + + float_dtype = fvalue.dtype + if quantized_dtype == jnp.int8: + # value -128 is not used. + num_buckets = jnp.array(127.0, dtype=float_dtype) + elif quantized_dtype == jnp.int16: + # value -32768 is not used. + num_buckets = jnp.array(32767.0, dtype=float_dtype) + else: + raise ValueError(f"Quantized dtype {quantized_dtype} not supported.") + # max value is mapped to num_buckets + + if extract_diagonal and fvalue.ndim != 2: + raise ValueError( + f"Input array {fvalue} must be 2D to work with extract_diagonal." + ) + + diagonal_fvalue = [] + if extract_diagonal: + diagonal_fvalue = jnp.diag(fvalue) + # Remove the diagonal entries. + fvalue = fvalue - jnp.diag(diagonal_fvalue) + + # TODO(rohananil): Extend this by making use of information about the blocks + # SM3 style which will be useful for diagonal statistics + # We first decide the scale. + if fvalue.ndim < 1: + raise ValueError( + f"Input array {fvalue} must have a strictly positive number of " + "dimensions." + ) + + max_abs = jnp.max(jnp.abs(fvalue), axis=0) + bucket_size = max_abs / num_buckets + bs_expanded = bucket_size[jnp.newaxis, Ellipsis] + # To avoid divide by 0.0 + bs_nonzero = jnp.where( + bs_expanded > 0.0, bs_expanded, jnp.ones_like(bs_expanded) + ) + ratio = fvalue / bs_nonzero + # We use rounding to remove bias. + quantized = jnp.round(ratio) + return quantized.astype(quantized_dtype), diagonal_fvalue, bucket_size + + def to_float(self): + """Returns the float value.""" + if isinstance(self.quantized, list) and not self.quantized: + return self.quantized + + if self.quantized_dtype == jnp.float32: + return self.quantized + + if self.quantized_dtype == jnp.bfloat16: + return self.quantized.astype(jnp.float32) + + float_dtype = self.bucket_size.dtype + bucket_size = self.bucket_size[jnp.newaxis, Ellipsis] + val = self.quantized.astype(float_dtype) * bucket_size + if self.extract_diagonal: + val += jnp.diag(self.diagonal) + return val diff --git a/tools/train/scalable_shampoo/sm3.py b/tools/train/scalable_shampoo/sm3.py new file mode 100644 index 0000000000000000000000000000000000000000..6620d03fdd748b82151dfcfb420d21c4b76111b4 --- /dev/null +++ b/tools/train/scalable_shampoo/sm3.py @@ -0,0 +1,176 @@ +# coding=utf-8 +# Copyright 2022 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# An implementation of SM3 from: +# +# Memory-Efficient Adaptive Optimization, https://arxiv.org/pdf/1901.11150.pdf +# Rohan Anil, Vineet Gupta, Tomer Koren, Yoram Singer +# +# Author: Rohan Anil (rohananil at google dot com) +# + +"""SM3 Implementation.""" + +import functools +from typing import Any, NamedTuple + +import chex +import jax +import jax.numpy as jnp +import optax + +from .quantization_utils import QuantizedValue + + +class SM3State(NamedTuple): + count: chex.Array + stats: Any + + +# Per parameter optimizer state used in data-parallel training. +class ParameterStats(NamedTuple): + """State associated to each parameter of the model being trained.""" + + diagonal_statistics: chex.Array # Accumulator for diagonal preconditioner + diagonal_momentum: QuantizedValue # Momentum for the diagonal preconditioner + + +def sm3( + learning_rate, beta1=0.9, beta2=0.999, diagonal_epsilon=1e-10, normalize_grads=False +): + """SM3 optimizer. + + Memory-Efficient Adaptive Optimization, Rohan Anil, Vineet Gupta, Tomer Koren, + Yoram Singer + + https://arxiv.org/abs/1901.11150 + + Args: + learning_rate: the step size used to update the parameters. + beta1: momentum parameter. + beta2: second moment averaging parameter. + diagonal_epsilon: epsilon for sm3 + normalize_grads: Whether to normalize grads. Author finds it useful when + grads are high variance. + + Returns: + a GradientTransformation. + """ + + def _quantize_momentum(momentum_statistics): + return QuantizedValue.from_float_value(momentum_statistics, jnp.int8) + + def init_fn(params): + """Initialise the optimiser's state.""" + + def _init(param): + accumulators = [jnp.zeros([s]) for s in param.shape] + momentum = _quantize_momentum(jnp.zeros_like(param)) + return ParameterStats(accumulators, momentum) + + return SM3State( + count=jnp.zeros([], jnp.int32), stats=jax.tree_map(_init, params) + ) + + def _get_expanded_shape(shape, i): + rank = len(shape) + # Replaces a `shape` of [M, N, K] with 1 in all dimensions except for i. + # For eg: i = 1 returns [1, N, 1]. + return [1] * i + [shape[i]] + [1] * (rank - i - 1) + + def _moving_averages(grad, accumulators): + w = (1.0 - beta2) if beta2 != 1.0 else 1.0 + if grad.ndim < 2: + return beta2 * accumulators[0] + w * grad**2 + else: + min_accumulator = functools.reduce(jnp.minimum, accumulators) + return beta2 * min_accumulator + w * grad**2 + + def _moving_averages_momentum(grad, momentum): + w = (1.0 - beta1) if beta1 != 1.0 else 1.0 + return beta1 * momentum.to_float() + w * grad + + def _sketch_diagonal_statistics(grad, updated_diagonal_statistics): + all_diagonal_statistics = [] + for i in range(grad.ndim): + axes = list(range(i)) + list(range(i + 1, grad.ndim)) + dim_diagonal_statistics = jnp.max(updated_diagonal_statistics, axis=axes) + all_diagonal_statistics.append(dim_diagonal_statistics) + if grad.ndim == 1: + all_diagonal_statistics[0] = updated_diagonal_statistics + return all_diagonal_statistics + + def update_fn(updates, state, params=None): + del params + stats = state.stats + if normalize_grads: + updates = jax.tree_map(lambda g: g / (jnp.linalg.norm(g) + 1e-16), updates) + # Reshape all vectors into N-d tensors to compute min over them. + # [n], [m] -> [n, 1], [1, m] + expanded_diagonal_statistics = jax.tree_multimap( + lambda grad, state: [ # pylint:disable=g-long-lambda + jnp.reshape( + state.diagonal_statistics[i], _get_expanded_shape(grad.shape, i) + ) + for i in range(grad.ndim) + ], + updates, + stats, + ) + + # Compute new diagonal statistics + new_diagonal_statistics = jax.tree_multimap( + _moving_averages, updates, expanded_diagonal_statistics + ) + + # Compute preconditioners (1/sqrt(s)) where s is the statistics. + new_preconditioners = jax.tree_map( + lambda t: 1.0 / jnp.sqrt(t + diagonal_epsilon), new_diagonal_statistics + ) + preconditioned_grads = jax.tree_multimap( + lambda g, p: g * p, updates, new_preconditioners + ) + + # Compute updated momentum (also handle quantization) + updated_momentum = jax.tree_multimap( + lambda preconditioned_grad, state: _moving_averages_momentum( # pylint:disable=g-long-lambda + preconditioned_grad, state.diagonal_momentum + ), + preconditioned_grads, + stats, + ) + + # Update diagonal statistics. + updated_diagonal_statistics = jax.tree_multimap( + _sketch_diagonal_statistics, updates, new_diagonal_statistics + ) + + # Update momentum. + new_sm3_stats = jax.tree_multimap( + lambda momentum, diagonal_stats: ParameterStats( # pylint:disable=g-long-lambda + diagonal_stats, _quantize_momentum(momentum) + ), + updated_momentum, + updated_diagonal_statistics, + ) + + lr = learning_rate + if callable(learning_rate): + lr = learning_rate(state.count) + + new_updates = jax.tree_map(lambda pg: -lr * pg, updated_momentum) + return new_updates, SM3State(count=state.count + 1, stats=new_sm3_stats) + + return optax.GradientTransformation(init_fn, update_fn) diff --git a/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py b/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..f6103a890bbefcc6a01ef875463d2a24c6b20de7 --- /dev/null +++ b/tools/train/scalable_shampoo/symmetric_matrices/symmetric_matrices.py @@ -0,0 +1,442 @@ +# coding=utf-8 +# Copyright 2022 The Google Research Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""JAX Ops for symmetric matrices used by the Shampoo optimizer.""" + +import functools +from typing import Any, List, Optional, Sequence, Union + +import jax +import jax.numpy as jnp +import numpy as np +from flax import struct +from jax import lax + + +@struct.dataclass +class SlicedSymmetricMatrix: + """A symmetric matrix represented by lower-triangular block row slices. + + For example, the symmetric matrix M = [[a, b^T], [b, c]] would be represented + by the block rows a and [b, c]. + + The matrix may be batched, in which case each entry of block_rows may have + dimension greater than 2. The last two dimensions represent the rows and cols. + """ + + block_rows: List[jnp.ndarray] + + +def product_with_transpose( + mat1, + mat2, + axes, + precision=lax.Precision.DEFAULT, +): + """Returns mat1 * mat2^T for two matrices (possibly batched). + + The rows and columns are the last two dimensions for each matrix. + + Args: + mat1: First matrix. + mat2: Second matrix. + axes: The axes over which to apply the product. + precision: JAX precision to use for the multiplication. + """ + return jnp.tensordot(a=mat1, b=mat2, axes=axes, precision=precision) + + +@functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision")) +def sliced_transposed_product( + mat, + block_size, + axes=(-1,), + precision=lax.Precision.DEFAULT, +): + """Returns the blocked slices representing a symmetric contraction. + + Specifically, the output is a contraction of the input mat with itself, in the + specified axes. + + Args: + mat: The matrix for which we will compute a contraction with itself. + block_size: The size of row blocks to compute. + axes: Axes to use for the contraction. + precision: The precision to use in each computation. + + Raises: + ValueError: Raised when the specified block size does not evenly divide + the number of rows of the input mat. + """ + rank = len(mat.shape) + + def _make_axis_positive(ax): + assert -rank <= ax < rank + return ax + rank if ax < 0 else ax + + positive_axes = [_make_axis_positive(ax) for ax in axes] + assert len(positive_axes) == len(axes) + remaining_axes = set(range(rank)) - set(positive_axes) + assert len(remaining_axes) == 1 + remaining_ax = remaining_axes.pop() + + num_rows = mat.shape[remaining_ax] + if num_rows % block_size != 0: + raise ValueError( + "The row dimension must be divisible by block_size. " + f"Instead got row dimension={num_rows} and block_size={block_size}." + ) + + block_rows = [] + for i in range(num_rows // block_size): + start_indices = [0] * rank + start_indices[remaining_ax] = i * block_size + + slice_sizes = list(mat.shape) + slice_sizes[remaining_ax] = block_size + + slice_sizes_full = list(mat.shape) + slice_sizes_full[remaining_ax] = (i + 1) * block_size + + block_rows.append( + product_with_transpose( + lax.dynamic_slice( + mat, start_indices=start_indices, slice_sizes=slice_sizes + ), + lax.dynamic_slice( + mat, start_indices=[0] * rank, slice_sizes=slice_sizes_full + ), + axes=(axes, axes), + precision=precision, + ) + ) + + return SlicedSymmetricMatrix(block_rows=block_rows) + + +@functools.partial(jax.jit, static_argnames=("block_size", "axes", "precision")) +def sliced_transposed_product_concat( + mat, + block_size, + axes=(-1,), + precision=lax.Precision.DEFAULT, +): + """Returns the concatenated slices representing mat*mat^T. + + Args: + mat: The matrix for which we will compute mat*mat^T. It does not need to be + square, and may be batched. + block_size: The size of row blocks to compute. + axes: Axes to use for the contraction. + precision: The precision to use in each computation. + + Raises: + ValueError: Raised when the specified block size does not evenly divide + the number of rows of the input mat. + """ + sliced_symmetric_matrix = sliced_transposed_product( + mat=mat, block_size=block_size, axes=axes, precision=precision + ) + return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) + + +@jax.jit +def materialize_matrix(symmetric_matrix): + """Returns a materialized symmetric matrix. + + Args: + symmetric_matrix: the matrix represented by lower-triangular block slices. + """ + block_rows = symmetric_matrix.block_rows + block_size = block_rows[0].shape[-2] + num_blocks = len(block_rows) + + # Slice the lower-triangular and diagonal blocks into blocks. + blocks = [ + [ + block_row[Ellipsis, i * block_size : (i + 1) * block_size] + for i in range(k + 1) + ] + for k, block_row in enumerate(block_rows) + ] + + # Generate the (off-diagonal) upper-triangular blocks. + off_diags = [[] for _ in range(num_blocks - 1)] + for k, block_row in enumerate(block_rows[1:]): + for i in range(k + 1): + off_diags[i].append( + jnp.swapaxes( + a=block_row[Ellipsis, i * block_size : (i + 1) * block_size], + axis1=-1, + axis2=-2, + ) + ) + + return jnp.block( + [row + row_t for row, row_t in zip(blocks[:-1], off_diags)] + [blocks[-1]] + ) + + +@functools.partial(jax.jit, static_argnames=("num_blocks")) +def materialize_matrix_from_concat( + block_rows_concat, + num_blocks=None, +): + """Returns a materialized symmetric matrix from concatenated slices. + + Args: + block_rows_concat: The matrix represented as the concatenated + lower-triangular blocks. + num_blocks: The number of block-rows used to represent the symmetric matrix. + If not specified, it is inferred from the shape of block_rows_concat. + """ + if num_blocks is None: + num_blocks = find_num_blocks(block_rows_concat) + + block_size = block_rows_concat.shape[-2] + + block_rows = [ + block_rows_concat[ + Ellipsis, + (k * (k + 1)) + // 2 + * block_size : (((k + 1) * (k + 2)) // 2 + 1) + * block_size, + ] + for k in range(num_blocks) + ] + + return materialize_matrix(SlicedSymmetricMatrix(block_rows=block_rows)) + + +@functools.partial(jax.jit, static_argnames=("alpha", "beta", "axes")) +def update_sliced_rows( + symmetric_matrix, + mat, + alpha, + beta, + axes=(-1,), +): + """Implements the blocked equivalent of SYRK. + + Specifically, the symmetric matrix (represented using lower-triangular block + rows) is updated using the sliced product of mat. + + Args: + symmetric_matrix: The symmetric matrix to update. + mat: The matrix to use for the update = mat * mat^T. The number of rows + should match that of symmetric_matrix. + alpha: The weight for the update. + beta: The weight for the original symmetric matrix. + axes: Axes to use for the contraction of the update. + + Returns: + The updated rows of alpha * mat * mat^T + beta * symmetric_matrix. + """ + block_size = symmetric_matrix.block_rows[0].shape[-2] + sym_prod = sliced_transposed_product(mat=mat, block_size=block_size, axes=axes) + return SlicedSymmetricMatrix( + block_rows=[ + update * alpha + row * beta + for update, row in zip(sym_prod.block_rows, symmetric_matrix.block_rows) + ] + ) + + +def num_blocks_from_total_blocks(total_blocks): + """Returns the number of blocks (i.e. + + block rows) from the total blocks. + + This is the inverse of the function x -> x*(x+1)/2. + + For example, the matrix M = [[A, B^T], [B, C]] may be represented using a + total of 3 blocks ([A, B, C]). The number of corresponding block rows is 2. + + Args: + total_blocks: The total blocks used to represent the matrix. + """ + num_blocks = np.round((np.sqrt(8 * total_blocks + 1) - 1) / 2).astype(np.int32) + if (num_blocks * (num_blocks + 1)) / 2 != total_blocks: + raise ValueError( + f"total_blocks={total_blocks} does not correspond to " + "a symmetric matrix. It must have the form total_blocks = x*(x+1)/2." + ) + return num_blocks + + +def find_num_blocks(block_rows_concat): + """Returns the number of (row) blocks representing the concatenated matrix. + + For example, an input with dimensions [256, 2560] represents 10 square blocks, + which matches 4 lower-triangular block rows (1+2+3+4). So this function will + return 4. + + Use ordinary numpy functions here so that the returned value is static. + + Args: + block_rows_concat: The concatenated block array. + + Raises: + ValueError: When the dimensions of the matrix do not correspond to a lower + triangular block representation. + """ + # Compute the number of square blocks used to represent the matrix. + total_blocks = block_rows_concat.shape[-1] / block_rows_concat.shape[-2] + # Determine the number of block rows by inverting y = x*(x+1)/2. + return num_blocks_from_total_blocks(total_blocks) + + +@functools.partial(jax.jit, static_argnames=("block_size")) +def slice_symmetric_matrix( + mat, + block_size, +): + """Returns sliced row blocks. + + Args: + mat: A symmetric matrix. + block_size: The size of the row slices. + """ + num_rows = mat.shape[-2] + num_cols = mat.shape[-1] + if num_rows != num_cols: + raise ValueError("mat is not square.") + if num_rows % block_size != 0: + raise ValueError( + "block size does not evenly divide rows. " + f"num_rows={num_rows}, block_size={block_size}" + ) + return SlicedSymmetricMatrix( + block_rows=[ + mat[ + Ellipsis, + i * block_size : (i + 1) * block_size, + 0 : (i + 1) * block_size, + ] + for i in range(num_rows // block_size) + ] + ) + + +@functools.partial(jax.jit, static_argnames=("block_size")) +def slice_symmetric_matrix_concat( + mat, + block_size, +): + """Returns the concatenated sliced row blocks. + + Args: + mat: A symmetric matrix. + block_size: The size of the row slices. + """ + sliced_symmetric_matrix = slice_symmetric_matrix(mat=mat, block_size=block_size) + return jnp.concatenate(sliced_symmetric_matrix.block_rows, axis=-1) + + +def sliced_matrix_diag(mat): + """Returns the diagonal of the symmetric matrix. + + Args: + mat: The symmetric matrix represented in concatenated block form. + """ + rows, cols = mat.shape + total_blocks = cols // rows + num_blocks = num_blocks_from_total_blocks(total_blocks) + diags = [] + for i in range(num_blocks): + last_index = rows * ((i + 2) * (i + 1)) // 2 + first_index = last_index - rows + diags.append(jnp.diag(mat[Ellipsis, first_index:last_index])) + return jnp.concatenate(diags, axis=-1) + + +def diag_as_concat(diag, block_size): + """Returns the representation of a diagonal matrix in symmetric block form. + + Args: + diag: The 1D array for the diagonals. + block_size: The size of blocks to use. Must divide the length of diag. + """ + assert len(diag.shape) == 1 # diag must be 1D. + assert len(diag) % block_size == 0 + num_diag_blocks = len(diag) // block_size + blocks = [] + for i in range(num_diag_blocks): + blocks.append(jnp.zeros(shape=(block_size, block_size * i), dtype=diag.dtype)) + blocks.append(jnp.diag(diag[i * block_size : (i + 1) * block_size])) + return jnp.concatenate(blocks, axis=-1) + + +def row_abs_maxes(mat): + """Returns the max of the absolute values of the rows of the full matrix. + + For example the symmetric matrix M = [[1, 6], [6, 2]] is represented using + mat = [1, 6, 2] with block_size = 1. In this case the function returns the + aboslute row maxes of the original symmetric matrix, [6, 6]. + + Args: + mat: The symmetric matrix represented as the concatenated blocks. + """ + rows, cols = mat.shape + + # Find col and row max for each block. + col_maxes = [] + row_maxes = [] + for i in range(cols // rows): + block = jnp.abs(mat[Ellipsis, i * rows : (i + 1) * rows]) + col_maxes.append(jnp.max(block, axis=1)) + row_maxes.append(jnp.max(block, axis=0)) + + # global row max from block maxes. + num_blocks = num_blocks_from_total_blocks(cols // rows) + maxes = [] + for i in range(num_blocks): + maxes.append( + jnp.concatenate( + row_maxes[(i * (i + 1) // 2) : ((i + 2) * (i + 1) // 2)] + + [ + col_maxes[((j + 1) * (j + 2)) // 2 - (j - i + 1)] + for j in range(i + 1, num_blocks) + ], + axis=-1, + ) + ) + + return jnp.max(jnp.stack(maxes), axis=0) + + +def times_vector(mat, vec): + """Returns the symmetric block-concatenated matrix multiplied by a vector. + + Specifically, each value in the vector is multiplied by a row of the full + matrix. That is, the vector is broadcast and multiplied element-wise. Note + this would be the transpose of full_mat * vec if full_mat represented the full + symmetric matrix. + + Args: + mat: The symmetric matrix represented as the concatenated blocks. + vec: The vector, having the same dimension as the materialized matrix. + """ + rows, cols = mat.shape + num_blocks = num_blocks_from_total_blocks(cols // rows) + multiplied = [] + for i in range(num_blocks): + mat_block = mat[ + Ellipsis, rows * ((i + 1) * i) // 2 : rows * ((i + 1) * (i + 2)) // 2 + ] + vec_block = vec[Ellipsis, rows * i : rows * (i + 1)] + multiplied.append(jnp.einsum("...ij,...i->ij", mat_block, vec_block)) + return jnp.concatenate(multiplied, axis=-1) diff --git a/tools/train/sweep.yaml b/tools/train/sweep.yaml new file mode 100644 index 0000000000000000000000000000000000000000..360d9e596514b6a271e3fa7d5f4dc91e9740ff07 --- /dev/null +++ b/tools/train/sweep.yaml @@ -0,0 +1,49 @@ +program: train.py +project: dalle-mini +method: random +metric: + name: eval/loss + goal: minimize +parameters: + optim: + value: distributed_shampoo + learning_rate: + distribution: log_uniform + # from exp(min) to exp(max) + min: -9.2 + max: -6.9 + tokenizer_name: + value: boris/dalle-mini-tokenizer + config_name: + value: ./config/mini + dtype: + value: bfloat16 + dataset_repo_or_path: + value: ./data + per_device_train_batch_size: + value: 64 + per_device_eval_batch_size: + value: 64 + gradient_accumulation_steps: + value: 1 + warmup_steps: + value: 1000 + num_train_epochs: + value: 1 + max_train_samples: + value: 1000000 + logging_steps: + value: 40 + eval_steps: + value: 200 + +command: + - python3 + - ${program} + - "--streaming" + - "--output_dir" + - "./output" + - "--overwrite_output_dir" + - "--do_train" + - "--do_eval" + - ${args} diff --git a/tools/train/train.py b/tools/train/train.py new file mode 100644 index 0000000000000000000000000000000000000000..3e22d31d88f865d5db0d2e9fb5757d723d6f1c96 --- /dev/null +++ b/tools/train/train.py @@ -0,0 +1,1436 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright 2021-2022 The HuggingFace & DALL·E Mini team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +Training DALL·E Mini. +Script adapted from run_summarization_flax.py +""" + +import io +import logging +import os +import sys +import tempfile +import time +from dataclasses import asdict, dataclass, field +from pathlib import Path +from typing import Any, Callable, NamedTuple, Optional + +import datasets +import flax +import jax +import jax.numpy as jnp +import jaxlib +import numpy as np +import optax +import transformers +import wandb +from datasets import Dataset +from flax.core.frozen_dict import FrozenDict, freeze, unfreeze +from flax.serialization import from_bytes, to_bytes +from flax.training import train_state +from flax.training.common_utils import onehot +from jax.experimental import PartitionSpec, maps +from jax.experimental.compilation_cache import compilation_cache as cc +from jax.experimental.pjit import pjit, with_sharding_constraint +from scalable_shampoo.distributed_shampoo import GraftingType, distributed_shampoo +from tqdm import tqdm +from transformers import HfArgumentParser + +import dalle_mini +from dalle_mini.data import Dataset +from dalle_mini.model import ( + DalleBart, + DalleBartConfig, + DalleBartTokenizer, + set_partitions, +) + +try: + from google.cloud import storage +except: + storage = None + +cc.initialize_cache("./jax_cache", max_cache_size_bytes=10 * 2**30) + +logger = logging.getLogger(__name__) + + +@dataclass +class ModelArguments: + """ + Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch. + """ + + model_name_or_path: Optional[str] = field( + default=None, + metadata={ + "help": "The model checkpoint for weights initialization. " + "Don't set if you want to train a model from scratch. " + "W&B artifact references are supported in addition to the sources supported by `PreTrainedModel`." + }, + ) + config_name: Optional[str] = field( + default=None, + metadata={ + "help": "Pretrained config name or path if not the same as model_name_or_path" + }, + ) + tokenizer_name: Optional[str] = field( + default=None, + metadata={ + "help": "Pretrained tokenizer name or path if not the same as model_name_or_path" + }, + ) + dtype: Optional[str] = field( + default="float32", + metadata={ + "help": "Floating-point format in which the computations will be performed (not the model weights). Choose one of `[float32, float16, bfloat16]`." + }, + ) + restore_state: Optional[bool] = field( + default=False, + metadata={ + "help": "Restore optimizer and training state. Can be True (will retrieve associated wandb artifact), a local directory or a Google bucket path." + }, + ) + + def __post_init__(self): + if self.tokenizer_name is None: + self.tokenizer_name = self.model_name_or_path + assert ( + self.tokenizer_name is not None + ), "Tokenizer name or model name/path needs to be specified" + if self.restore_state: + assert self.model_name_or_path is not None and ( + "/model-" in self.model_name_or_path + ), "Restoring state only available with W&B artifact reference" + + def get_metadata(self): + if self.restore_state: + if jax.process_index() == 0: + artifact = wandb.run.use_artifact(self.model_name_or_path) + else: + artifact = wandb.Api().artifact(self.model_name_or_path) + return artifact.metadata + else: + return dict() + + def get_opt_state(self): + with tempfile.TemporaryDirectory() as tmp_dir: # avoid multiple artifact copies + if self.restore_state is True: + # wandb artifact + state_artifact = self.model_name_or_path.replace( + "/model-", "/state-", 1 + ) + if jax.process_index() == 0: + artifact = wandb.run.use_artifact(state_artifact) + else: + artifact = wandb.Api().artifact(state_artifact) + if artifact.metadata.get("bucket_path"): + # we will read directly file contents + self.restore_state = artifact.metadata["bucket_path"] + else: + artifact_dir = artifact.download(tmp_dir) + self.restore_state = str(Path(artifact_dir) / "opt_state.msgpack") + + if self.restore_state.startswith("gs://"): + bucket_path = Path(self.restore_state[5:]) / "opt_state.msgpack" + bucket, blob_name = str(bucket_path).split("/", 1) + assert ( + storage is not None + ), 'Could not find google.storage. Install with "pip install google-cloud-storage"' + client = storage.Client() + bucket = client.bucket(bucket) + blob = bucket.blob(blob_name) + return blob.download_as_bytes() + + with Path(self.restore_state).open("rb") as f: + return f.read() + + +@dataclass +class DataTrainingArguments: + """ + Arguments pertaining to what data we are going to input our model for training and eval. + """ + + text_column: Optional[str] = field( + default="caption", + metadata={ + "help": "The name of the column in the datasets containing the full texts (for summarization)." + }, + ) + encoding_column: Optional[str] = field( + default="encoding", + metadata={ + "help": "The name of the column in the datasets containing the image encodings." + }, + ) + dataset_repo_or_path: str = field( + default=None, + metadata={"help": "The dataset repository containing encoded files."}, + ) + train_file: Optional[str] = field( + default=None, + metadata={ + "help": "The input training data file (glob & braceexpand acceptable)." + }, + ) + validation_file: Optional[str] = field( + default=None, + metadata={ + "help": "An optional input evaluation data file (glob & braceexpand acceptable)." + }, + ) + # data loading should not be a bottleneck so we use "streaming" mode by default + streaming: Optional[bool] = field( + default=True, + metadata={"help": "Whether to stream the dataset."}, + ) + use_auth_token: Optional[bool] = field( + default=False, + metadata={ + "help": "Whether to use the authentication token for private datasets." + }, + ) + shard_by_host: Optional[bool] = field( + default=False, + metadata={ + "help": "Whether to shard data files by host in multi-host environments." + }, + ) + blank_caption_prob: Optional[float] = field( + default=0.0, + metadata={ + "help": "Probability of removing some captions for classifier-free guidance." + }, + ) + clip_score_column: Optional[str] = field( + default="clip_score", + metadata={"help": "Column that containts clip score for filtering."}, + ) + min_clip_score: Optional[float] = field( + default=None, + metadata={"help": "Minimum clip score required."}, + ) + max_clip_score: Optional[float] = field( + default=None, + metadata={"help": "Maximum clip score required."}, + ) + filter_column: Optional[str] = field( + default=None, + metadata={"help": "Column that containts classes to be filtered."}, + ) + filter_value: Optional[str] = field( + default=None, + metadata={"help": "Class value to be kept during filtering."}, + ) + max_train_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of training examples." + }, + ) + max_eval_samples: Optional[int] = field( + default=None, + metadata={ + "help": "For debugging purposes or quicker training, truncate the number of evaluation examples." + }, + ) + preprocessing_num_workers: Optional[int] = field( + default=None, + metadata={ + "help": "The number of processes to use for the preprocessing. Not used in streaming mode." + }, + ) + overwrite_cache: bool = field( + default=False, + metadata={ + "help": "Overwrite the cached training and evaluation sets. Not used in streaming mode." + }, + ) + # default seed of None ensures we don't repeat the same items if script was interrupted during an epoch + seed_dataset: int = field( + default=None, + metadata={ + "help": "Random seed for the dataset that will be set at the beginning of training." + }, + ) + + def __post_init__(self): + if self.dataset_repo_or_path is None: + raise ValueError("Need a dataset repository or path.") + + +@dataclass +class TrainingArguments: + """ + Arguments pertaining to training parameters. + """ + + output_dir: str = field( + metadata={ + "help": "The output directory where the model predictions and checkpoints will be written." + }, + ) + overwrite_output_dir: bool = field( + default=False, + metadata={ + "help": ( + "Overwrite the content of the output directory. " + "Use this to continue training if output_dir points to a checkpoint directory." + ) + }, + ) + + do_train: bool = field(default=False, metadata={"help": "Whether to run training."}) + do_eval: bool = field( + default=False, metadata={"help": "Whether to run eval on the validation set."} + ) + + per_device_train_batch_size: int = field( + default=8, + metadata={"help": "Batch size per data parallel device for training."}, + ) + per_device_eval_batch_size: Optional[int] = field( + default=None, + metadata={ + "help": "Batch size per data parallel device for evaluation. Same as training batch size if not set." + }, + ) + + gradient_accumulation_steps: int = field( + default=1, + metadata={ + "help": "Number of updates steps to accumulate before performing an update pass." + }, + ) + gradient_checkpointing: bool = field( + default=False, metadata={"help": "Use gradient checkpointing."} + ) + + learning_rate: float = field( + default=5e-5, metadata={"help": "The initial learning rate."} + ) + optim: str = field( + default="distributed_shampoo", + metadata={ + "help": 'The optimizer to use. Can be "distributed_shampoo" (default), "adam" or "adafactor"' + }, + ) + beta1: float = field( + default=0.9, + metadata={"help": "Beta1 for Adam & Distributed Shampoo."}, + ) + beta2: float = field( + default=0.999, + metadata={"help": "Beta2 for for Adam & Distributed Shampoo."}, + ) + adam_epsilon: float = field( + default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."} + ) + max_grad_norm: float = field( + default=1.0, metadata={"help": "Max gradient norm for Adafactor."} + ) + block_size: int = field( + default=1024, + metadata={"help": "Chunked size for large layers with Distributed Shampoo."}, + ) + preconditioning_compute_steps: int = field( + default=10, metadata={"help": "Number of steps to update preconditioner."} + ) + skip_preconditioning_dim_size_gt: int = field( + default=4096, + metadata={"help": "Max size for preconditioning with Distributed Shampoo."}, + ) + graft_type: str = field( + default="rmsprop_normalized", + metadata={ + "help": "The type of grafting to use. Can be 'rmsprop_normalized' (default), 'rmsprop', 'adagrad', 'adagrad_normalized', 'sgd' or 'sqrt_n'" + }, + ) + optim_quantized: bool = field( + default=False, + metadata={ + "help": "Whether to quantize optimizer (only supported with Distributed Shampoo)." + }, + ) + + num_train_epochs: int = field( + default=3, metadata={"help": "Total number of training epochs to perform."} + ) + + warmup_steps: int = field( + default=0, metadata={"help": "Linear warmup over warmup_steps."} + ) + lr_decay: str = field( + default=None, + metadata={ + "help": "Decay to be used in the learning rate scheduler. Can be None (default), linear or exponential." + }, + ) + lr_transition_steps: int = field( + default=None, + metadata={ + "help": "Number of transition steps associated with learning rate decay when using exponential decay." + }, + ) + lr_decay_rate: float = field( + default=None, + metadata={ + "help": "Decay rate associated with learning rate when using exponential decay." + }, + ) + lr_staircase: bool = field( + default=False, + metadata={ + "help": "Whether to use staircase or continuous learning rate when using exponential decay." + }, + ) + + logging_steps: int = field( + default=40, metadata={"help": "Log every X updates steps."} + ) + eval_steps: int = field( + default=400, metadata={"help": "Run an evaluation every X steps."} + ) + save_steps: int = field( + default=4000, metadata={"help": "Save checkpoint every X updates steps."} + ) + log_model: bool = field( + default=False, + metadata={"help": "Log model to wandb at `save_steps` frequency."}, + ) + log_norm_steps: int = field( + default=True, + metadata={"help": "Log parameters and gradients norm at this frequency."}, + ) + log_histogram_steps: int = field( + default=False, + metadata={ + "help": "Log parameters and gradients histograms at this frequency. Slows down training." + }, + ) + + seed_model: int = field( + default=42, + metadata={ + "help": "Random seed for the model that will be set at the beginning of training." + }, + ) + + wandb_entity: Optional[str] = field( + default=None, + metadata={"help": "The wandb entity to use (for teams)."}, + ) + wandb_project: str = field( + default="dalle-mini", + metadata={"help": "The name of the wandb project."}, + ) + wandb_job_type: str = field( + default="Seq2Seq", + metadata={"help": "The name of the wandb job type."}, + ) + + assert_TPU_available: bool = field( + default=False, + metadata={"help": "Verify that TPU is not in use."}, + ) + + mp_devices: Optional[int] = field( + default=1, + metadata={ + "help": "Number of devices required for model parallelism. The other dimension of available devices is used for data parallelism." + }, + ) + + dp_devices: int = field(init=False) + + def __post_init__(self): + if self.assert_TPU_available: + assert ( + jax.local_device_count() == 8 + ), "TPUs in use, please check running processes" + if self.output_dir.startswith("gs://"): + assert ( + storage is not None + ), 'Could not find google.storage. Install with "pip install google-cloud-storage"' + assert self.optim in [ + "distributed_shampoo", + "adam", + "adafactor", + ], f"Selected optimizer not supported: {self.optim}" + assert self.graft_type in [ + "rmsprop_normalized", + "rmsprop", + "adagrad", + "adagrad_normalized", + "sgd", + "sqrt_n", + ], f"Selected graft type not supported: {self.graft_type}" + assert self.lr_decay in [ + None, + "linear", + "exponential", + ], f"Selected learning rate decay not supported: {self.lr_decay}" + if self.per_device_eval_batch_size is None: + self.per_device_eval_batch_size = self.per_device_train_batch_size + if self.log_norm_steps is True: + self.log_norm_steps = self.logging_steps + if ( + os.path.exists(self.output_dir) + and os.listdir(self.output_dir) + and self.do_train + and not self.overwrite_output_dir + ): + raise ValueError( + f"Output directory ({self.output_dir}) already exists and is not empty." + "Use --overwrite_output_dir to overcome." + ) + assert ( + self.mp_devices > 0 + ), f"Number of devices for model parallelism must be > 0" + assert ( + jax.device_count() % self.mp_devices == 0 + ), f"Number of available devices ({jax.device_count()} must be divisible by number of devices used for model parallelism ({self.mp_devices})." + self.dp_devices = jax.device_count() // self.mp_devices + + +class TrainState(train_state.TrainState): + dropout_rng: jnp.ndarray = None + epoch: int = 0 + train_time: float = 0.0 # total time the model trained + train_samples: int = 0 # number of samples seen + + +def main(): + # See all possible arguments by passing the --help flag to this script. + parser = HfArgumentParser( + (ModelArguments, DataTrainingArguments, TrainingArguments) + ) + if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): + # If we pass only one argument to the script and it's the path to a json file, + # let's parse it to get our arguments. + model_args, data_args, training_args = parser.parse_json_file( + json_file=os.path.abspath(sys.argv[1]) + ) + else: + model_args, data_args, training_args = parser.parse_args_into_dataclasses() + + # Make one log on every process with the configuration for debugging. + logging.basicConfig( + format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", + datefmt="%m/%d/%Y %H:%M:%S", + level=logging.INFO, + ) + # Setup logging, we only want one process per machine to log things on the screen. + logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR) + if jax.process_index() == 0: + datasets.utils.logging.set_verbosity_warning() + transformers.utils.logging.set_verbosity_info() + else: + datasets.utils.logging.set_verbosity_error() + transformers.utils.logging.set_verbosity_error() + + # Set the verbosity to info of the Transformers logger (on main process only): + logger.info(f"Training/evaluation parameters {training_args}") + + # Load dataset + dataset = Dataset( + **asdict(data_args), + do_train=training_args.do_train, + do_eval=training_args.do_eval, + ) + + logger.info(f"Local TPUs: {jax.local_device_count()}") + logger.info(f"Global TPUs: {jax.device_count()}") + + # Set up wandb run + if jax.process_index() == 0: + wandb.init( + entity=training_args.wandb_entity, + project=training_args.wandb_project, + job_type=training_args.wandb_job_type, + config=parser.parse_args(), + ) + + # Set up our new model config + if model_args.config_name: + config = DalleBartConfig.from_pretrained(model_args.config_name) + config.gradient_checkpointing = training_args.gradient_checkpointing + else: + config = None + + # Load or create new model + if model_args.model_name_or_path: + model = DalleBart.from_pretrained( + model_args.model_name_or_path, + config=config, + seed=training_args.seed_model, + dtype=getattr(jnp, model_args.dtype), + abstract_init=True, # we overwrite them with loaded checkpoint + gradient_checkpointing=training_args.gradient_checkpointing, + ) + else: + model = DalleBart( + config, + seed=training_args.seed_model, + dtype=getattr(jnp, model_args.dtype), + abstract_init=True, + ) + + # get model metadata + model_metadata = model_args.get_metadata() + + # get PartitionSpec for model params (required to be a dict) + param_spec = set_partitions(model.params) + + # convert params to frozen dict + model._params = freeze(model.params) + + # Load tokenizer + tokenizer = DalleBartTokenizer.from_pretrained( + model_args.tokenizer_name, use_fast=True + ) + + # Preprocessing the datasets. + # We need to normalize and tokenize inputs and targets. + dataset.preprocess(tokenizer=tokenizer, config=model.config) + + # Initialize our training + dropout_rng = jax.random.PRNGKey(training_args.seed_model) + + # Store some constant + num_epochs = training_args.num_train_epochs + # batch size + batch_size_per_node_per_grad_step = ( + training_args.per_device_train_batch_size + * jax.local_device_count() + // training_args.mp_devices + ) + batch_size_per_node = ( + batch_size_per_node_per_grad_step * training_args.gradient_accumulation_steps + ) + batch_size_per_step = batch_size_per_node * jax.process_count() + eval_batch_size_per_node = ( + training_args.per_device_eval_batch_size + * jax.local_device_count() + // training_args.mp_devices + ) + eval_batch_size_per_step = eval_batch_size_per_node * jax.process_count() + len_train_dataset, len_eval_dataset = dataset.length + steps_per_epoch = ( + len_train_dataset // batch_size_per_node + if len_train_dataset is not None + else None + ) + num_train_steps = ( + steps_per_epoch * num_epochs if steps_per_epoch is not None else None + ) + num_params = model.num_params + + logger.info("***** Running training *****") + logger.info(f" Num examples = {len_train_dataset}") + logger.info(f" Num Epochs = {num_epochs}") + logger.info( + f" Batch size per dp device = {training_args.per_device_train_batch_size}" + ) + logger.info(f" Number of devices = {jax.device_count()}") + logger.info( + f" Gradient accumulation steps = {training_args.gradient_accumulation_steps}" + ) + logger.info(f" Batch size per update = {batch_size_per_step}") + logger.info(f" Model parameters = {num_params:,}") + + # set up wandb run + if jax.process_index() == 0: + # set default x-axis as 'train/step' + wandb.define_metric("*", step_metric="train/step") + + # add interesting config parameters + wandb.config.update( + { + "len_train_dataset": len_train_dataset, + "len_eval_dataset": len_eval_dataset, + "batch_size_per_step": batch_size_per_step, + "num_params": num_params, + "model_config": model.config.to_dict(), + "num_devices": jax.device_count(), + "versions": { + "jax": jax.__version__, + "jaxlib": jaxlib.__version__, + "flax": flax.__version__, + "transformers": transformers.__version__, + "datasets": datasets.__version__, + "wandb": wandb.__version__, + "dalle_mini": dalle_mini.__version__, + }, + } + ) + + # Create learning rate schedule + def create_learning_rate_fn() -> Callable[[int], jnp.array]: + """Create the learning rate function.""" + warmup_fn = optax.linear_schedule( + init_value=0.0, + end_value=training_args.learning_rate, + transition_steps=training_args.warmup_steps + 1, # ensure not 0 + ) + # offset step when resuming + if model_metadata.get("step", 0): + warmup_fn = optax.join_schedules( + schedules=[optax.constant_schedule(0.0), warmup_fn], + boundaries=[model_metadata["step"]], + ) + if training_args.lr_decay is None: + return warmup_fn + elif training_args.lr_decay == "linear": + assert ( + num_train_steps is not None + ), "linear decay requires knowing the dataset length" + decay_fn = optax.linear_schedule( + init_value=training_args.learning_rate, + end_value=0, + transition_steps=num_train_steps - training_args.warmup_steps, + ) + elif training_args.lr_decay == "exponential": + decay_fn = optax.exponential_decay( + init_value=training_args.learning_rate, + transition_steps=training_args.lr_transition_steps, + decay_rate=training_args.lr_decay_rate, + staircase=training_args.lr_staircase, + ) + schedule_fn = optax.join_schedules( + schedules=[warmup_fn, decay_fn], + boundaries=[model_metadata.get("step", 0) + training_args.warmup_steps], + ) + return schedule_fn + + learning_rate_fn = create_learning_rate_fn() + + # create adam optimizer + if training_args.optim == "distributed_shampoo": + # parameters from https://github.com/tensorflow/lingvo/blob/03ee9d7cd50764b0424c7c863733c91fc0b053ec/lingvo/jax/optimizers.py#L729 + graft_type = { + "sgd": GraftingType.SGD, + "adagrad": GraftingType.ADAGRAD, + "rmsprop": GraftingType.RMSPROP, + "rmsprop_normalized": GraftingType.RMSPROP_NORMALIZED, + "sqrt_n": GraftingType.SQRT_N, + "adagrad_normalized": GraftingType.ADAGRAD_NORMALIZED, + }[training_args.graft_type] + optimizer = distributed_shampoo( + learning_rate_fn, + block_size=training_args.block_size, + beta1=training_args.beta1, + beta2=training_args.beta2, + diagonal_epsilon=1e-10, + matrix_epsilon=1e-6, + start_preconditioning_step=max( + training_args.preconditioning_compute_steps + 1, 101 + ), + preconditioning_compute_steps=training_args.preconditioning_compute_steps, + statistics_compute_steps=1, + best_effort_shape_interpretation=True, + graft_type=graft_type, + nesterov=False, + exponent_override=0, + statistics_partition_spec=PartitionSpec(None, "dp", None), + preconditioner_partition_spec=PartitionSpec("dp", None, None), + num_devices_for_pjit=training_args.dp_devices, + shard_optimizer_states=True, + inverse_failure_threshold=0.1, + moving_average_for_momentum=True, + skip_preconditioning_dim_size_gt=training_args.skip_preconditioning_dim_size_gt, + clip_by_scaled_gradient_norm=None, + precision=jax.lax.Precision.HIGHEST, + best_effort_memory_usage_reduction=training_args.optim_quantized, + ) + # get the real optimizer and helper functions + update_fn = optimizer.update + optimizer = optimizer.init(model.params) + opt_fn = NamedTuple("opt_fn", pspec_fn=Any, shape_and_dtype_fn=Any)( + optimizer.pspec_fn, optimizer.shape_and_dtype_fn + ) + optimizer = optax.GradientTransformation(optimizer.init_fn, update_fn) + + elif training_args.optim == "adam": + optimizer = optax.adamw( + learning_rate=learning_rate_fn, + b1=training_args.beta1, + b2=training_args.beta2, + eps=training_args.adam_epsilon, + ) + elif training_args.optim == "adafactor": + # We use the default parameters here to initialize adafactor, + # For more details about the parameters please check https://github.com/deepmind/optax/blob/ed02befef9bf81cbbf236be3d2b0e032e9ed4a40/optax/_src/alias.py#L74 + optimizer = optax.adafactor( + learning_rate=learning_rate_fn, + clipping_threshold=training_args.max_grad_norm, + ) + + # get PartitionSpec for optimizer state + def get_opt_state_spec_and_shape(param_spec): + # get opt_state shape without actual init + opt_state_shape = jax.eval_shape(optimizer.init, model.params) + + if training_args.optim == "adam": + + def _opt_state_spec_per_leaf(x): + if isinstance(x, FrozenDict): + # variables with same structure as params + return param_spec + else: + # other variables such as count + return None + + opt_state_spec = jax.tree_map( + _opt_state_spec_per_leaf, + opt_state_shape, + # return None spec for empty elements + is_leaf=lambda x: isinstance(x, (FrozenDict, optax.EmptyState)), + ) + + elif training_args.optim == "adafactor": + # factorized state must be replicated (rank different than params) + opt_state_spec = None + + elif training_args.optim == "distributed_shampoo": + opt_state_spec = opt_fn.pspec_fn( + params=model.params, + params_partition_spec=param_spec, + partition_spec_for_statistics=PartitionSpec(None, "dp", None), + ) + else: + raise NotImplementedError + return opt_state_spec, opt_state_shape + + opt_state_spec, opt_state_shape = get_opt_state_spec_and_shape(param_spec) + + # create a mesh + mesh_shape = (training_args.dp_devices, training_args.mp_devices) + devices = np.asarray(jax.devices()).reshape(*mesh_shape) + mesh = maps.Mesh(devices, ("dp", "mp")) + logger.info(f" Mesh shape: {mesh_shape}") + + # define state spec + state_spec = TrainState( + params=param_spec, + opt_state=opt_state_spec, + dropout_rng=None, + step=None, + epoch=None, + train_time=None, + train_samples=None, + apply_fn=model.__call__, + tx=optimizer, + ) + + # init params if not available yet + def maybe_init_params(params): + if model_args.model_name_or_path: + # model params are correctly loaded + return params + else: + # params have not been initialized yet + return model.init_weights() + + with mesh: + logger.info(" Creating state") + if not model_args.restore_state: + + def init_state(params): + return TrainState.create( + apply_fn=model.__call__, + tx=optimizer, + params=maybe_init_params(params), + dropout_rng=dropout_rng, + ) + + state = pjit( + init_state, + in_axis_resources=(param_spec,) + if model_args.model_name_or_path + else None, + out_axis_resources=state_spec, + donate_argnums=(0,), + )(model.params if model_args.model_name_or_path else None) + + else: + # load opt_state + opt_state = from_bytes(opt_state_shape, model_args.get_opt_state()) + + # restore other attributes + attr_state = { + k: model_metadata[k] + for k in ["step", "epoch", "train_time", "train_samples"] + } + + def restore_state(params, opt_state): + return TrainState( + apply_fn=model.__call__, + tx=optimizer, + params=params, + opt_state=opt_state, + dropout_rng=dropout_rng, + **attr_state, + ) + + state = pjit( + restore_state, + in_axis_resources=( + param_spec, + opt_state_spec, + ), + out_axis_resources=state_spec, + donate_argnums=(0, 1), + )(model.params, opt_state) + + # remove opt_state from CPU + del opt_state + + # free CPU memory + del model._params, opt_state_spec, opt_state_shape + + # define batch specs + batch_spec = PartitionSpec("dp") + grad_batch_spec = PartitionSpec(None, "dp") + + # define loss + def loss_fn(logits, labels): + loss = optax.softmax_cross_entropy(logits, onehot(labels, logits.shape[-1])) + loss = loss.mean() + return loss + + # "vmap trick" avoids a crash when mp_devices > 1 (not sure why it happens) + # lead to better perf: see https://wandb.ai/dalle-mini/dalle-mini/reports/JAX-pmap-vs-pjit--VmlldzoxNDg1ODA2 + use_vmap_trick = True + + # make grad_param_spec for vmap + if use_vmap_trick: + grad_param_spec = jax.tree_map( + lambda x: PartitionSpec(*("dp",) + (x if x is not None else (None,))), + param_spec, + ) + + # Define gradient update step fn + def train_step(state, batch, train_time): + + # get a minibatch (one gradient accumulation slice) + def get_minibatch(batch, grad_idx): + return jax.tree_map( + lambda x: jax.lax.dynamic_index_in_dim(x, grad_idx, keepdims=False), + batch, + ) + + def compute_loss(params, minibatch, dropout_rng): + # minibatch has dim (batch_size, ...) + minibatch, labels = minibatch.pop("labels") + logits = state.apply_fn( + **minibatch, params=params, dropout_rng=dropout_rng, train=True + )[0] + return loss_fn(logits, labels) + + grad_fn = jax.value_and_grad(compute_loss) + + def loss_and_grad(grad_idx, dropout_rng): + # minibatch at grad_idx for gradient accumulation (None otherwise) + minibatch = ( + get_minibatch(batch, grad_idx) if grad_idx is not None else batch + ) + # ensure it is sharded properly + minibatch = with_sharding_constraint(minibatch, batch_spec) + # only 1 single rng per grad step, let us handle larger batch size (not sure why) + dropout_rng, _ = jax.random.split(dropout_rng) + + if use_vmap_trick: + # "vmap trick", calculate loss and grads independently per dp_device + loss, grads = jax.vmap( + grad_fn, in_axes=(None, 0, None), out_axes=(0, 0) + )(state.params, minibatch, dropout_rng) + # ensure they are sharded correctly + loss = with_sharding_constraint(loss, batch_spec) + grads = with_sharding_constraint(grads, grad_param_spec) + # average across all devices + # Note: we could average per device only after gradient accumulation, right before params update + loss, grads = jax.tree_map(lambda x: jnp.mean(x, axis=0), (loss, grads)) + else: + # "vmap trick" does not work in multi-hosts and requires too much hbm + loss, grads = grad_fn(state.params, minibatch, dropout_rng) + # ensure grads are sharded + grads = with_sharding_constraint(grads, param_spec) + # return loss and grads + return loss, grads, dropout_rng + + if training_args.gradient_accumulation_steps == 1: + loss, grads, dropout_rng = loss_and_grad(None, state.dropout_rng) + else: + # create initial state for cumul_minibatch_step loop + init_minibatch_step = ( + 0.0, + with_sharding_constraint( + jax.tree_map(jnp.zeros_like, state.params), param_spec + ), + state.dropout_rng, + ) + + # accumulate gradients + def cumul_minibatch_step(grad_idx, cumul_loss_grad_dropout): + cumul_loss, cumul_grads, dropout_rng = cumul_loss_grad_dropout + loss, grads, dropout_rng = loss_and_grad(grad_idx, dropout_rng) + cumul_loss, cumul_grads = jax.tree_map( + jnp.add, (cumul_loss, cumul_grads), (loss, grads) + ) + cumul_grads = with_sharding_constraint(cumul_grads, param_spec) + return cumul_loss, cumul_grads, dropout_rng + + # loop over gradients + loss, grads, dropout_rng = jax.lax.fori_loop( + 0, + training_args.gradient_accumulation_steps, + cumul_minibatch_step, + init_minibatch_step, + ) + grads = with_sharding_constraint(grads, param_spec) + # sum -> mean + loss, grads = jax.tree_map( + lambda x: x / training_args.gradient_accumulation_steps, (loss, grads) + ) + + grads = with_sharding_constraint(grads, param_spec) + + # update state + state = state.apply_gradients( + grads=grads, + dropout_rng=dropout_rng, + train_time=train_time, + train_samples=state.train_samples + batch_size_per_step, + ) + + metrics = { + "loss": loss, + "learning_rate": learning_rate_fn(state.step), + } + + def maybe_fn(fn, val, zeros, freq): + """Call fn only if it is a logging step""" + return jax.lax.cond( + state.step % freq == 0, + fn, + lambda _: zeros, + val, + ) + + if training_args.log_norm_steps: + zeros_norm = jax.tree_map(lambda _: jnp.float32(0), state.params) + + def norm(val): + return jax.tree_map(lambda x: jnp.linalg.norm(x), val) + + gradients_norm = maybe_fn( + norm, grads, zeros_norm, training_args.log_norm_steps + ) + params_norm = maybe_fn( + norm, state.params, zeros_norm, training_args.log_norm_steps + ) + + metrics.update( + { + "gradients_norm": gradients_norm, + "params_norm": params_norm, + } + ) + + if training_args.log_histogram_steps: + zeros_hist = jax.tree_map( + lambda _: jnp.histogram(jnp.zeros(1), density=True), state.params + ) + + def histogram(val): + return jax.tree_map(lambda x: jnp.histogram(x, density=True), val) + + gradients_hist = maybe_fn( + histogram, grads, zeros_hist, training_args.log_histogram_steps + ) + params_hist = maybe_fn( + histogram, state.params, zeros_hist, training_args.log_histogram_steps + ) + + metrics.update( + { + "params_hist": params_hist, + "gradients_hist": gradients_hist, + } + ) + + return state, metrics + + # Define eval fn + def eval_step(state, batch): + def compute_eval_loss(batch): + batch, labels = batch.pop("labels") + logits = model(**batch, params=state.params, train=False)[0] + return loss_fn(logits, labels) + + if use_vmap_trick: + loss = jax.vmap(compute_eval_loss)(batch) + # ensure they are sharded correctly + loss = with_sharding_constraint(loss, batch_spec) + # average across all devices + loss = jnp.mean(loss) + else: + loss = compute_eval_loss(batch) + + return loss + + # Create parallel version of the train and eval step + p_train_step = pjit( + train_step, + in_axis_resources=( + state_spec, + grad_batch_spec + if training_args.gradient_accumulation_steps > 1 + else batch_spec, + None, + ), + out_axis_resources=(state_spec, None), + donate_argnums=(0,), + ) + p_eval_step = pjit( + eval_step, + in_axis_resources=(state_spec, batch_spec), + out_axis_resources=None, + ) + + # define metrics logger + class MetricsLogger: + def __init__(self, step): + # keep state + self.state_dict = {} + # estimate speed + self.step = step + self.time = time.perf_counter() + self.offset_time = 0.0 + + def update_state_metrics(self, state): + """Update internal state metrics (logged at each call to be used as x-axis)""" + self.state_dict = { + f'train/{k.split("_")[-1]}': state[k] + for k in ["step", "epoch", "train_time", "train_samples"] + } + # timing metrics + new_step = int(state["step"]) + new_time = time.perf_counter() + if new_step > self.step: + # remove time for eval & save + delta_time = new_time - self.time - self.offset_time + self.offset_time = 0 + time_per_step = delta_time / (new_step - self.step) + self.step = new_step + self.time = new_time + self.log_time("train_per_step", time_per_step, offset=False) + self.log_time("train_per_log", delta_time, offset=False) + + def log_time(self, key, duration, offset=True): + wandb.log({f"time/{key}": duration, **self.state_dict}) + if offset: + self.offset_time += duration + + def log(self, metrics, prefix=None): + if jax.process_index() == 0: + log_metrics = {} + for k, v in metrics.items(): + if "_norm" in k: + if self.step % training_args.log_norm_steps == 0: + log_metrics[f"{k}/"] = unfreeze(v) + elif "_hist" in k: + if self.step % training_args.log_histogram_steps == 0: + v = jax.tree_map(lambda x: jax.device_get(x), unfreeze(v)) + v = jax.tree_map( + lambda x: wandb.Histogram(np_histogram=x), + v, + is_leaf=lambda x: isinstance(x, tuple), + ) + log_metrics[f"{k}/"] = v + else: + if prefix is not None: + k = f"{prefix}/{k}" + log_metrics[k] = v + wandb.log({**log_metrics, **self.state_dict}) + + # keep local copy of state + local_state = { + k: jax.device_get(getattr(state, k)).item() + for k in ["step", "epoch", "train_time", "train_samples"] + } + # init variables + start_time = time.perf_counter() - local_state["train_time"] + train_metrics = None + metrics_logger = MetricsLogger(local_state["step"]) + epochs = tqdm( + range(local_state["epoch"], num_epochs), + desc=f"Epoch ... (1/{num_epochs})", + position=0, + disable=jax.process_index() > 0, + ) + + def run_evaluation(): + # ======================== Evaluating ============================== + if training_args.do_eval: + start_eval_time = time.perf_counter() + eval_loader = dataset.dataloader("eval", eval_batch_size_per_step) + eval_steps = ( + len_eval_dataset // eval_batch_size_per_step + if len_eval_dataset is not None + else None + ) + eval_loss = [] + for batch in tqdm( + eval_loader, + desc="Evaluating...", + position=2, + leave=False, + total=eval_steps, + disable=jax.process_index() > 0, + ): + # need to keep only eval_batch_size_per_node items relevant to the node + batch = jax.tree_map( + lambda x: x.reshape( + (jax.process_count(), eval_batch_size_per_node) + x.shape[1:] + ), + batch, + ) + batch = jax.tree_map(lambda x: x[jax.process_index()], batch) + + # add dp dimension when using "vmap trick" + if use_vmap_trick: + bs_shape = ( + jax.local_device_count() // training_args.mp_devices, + training_args.per_device_eval_batch_size, + ) + batch = jax.tree_map( + lambda x: x.reshape(bs_shape + x.shape[1:]), batch + ) + + # freeze batch to pass safely to jax transforms + batch = freeze(batch) + # accumulate losses async + eval_loss.append(p_eval_step(state, batch)) + + # get the mean of the loss + eval_loss = jnp.stack(eval_loss) + eval_loss = jnp.mean(eval_loss) + eval_metrics = {"loss": eval_loss} + + # log metrics + metrics_logger.log(eval_metrics, prefix="eval") + metrics_logger.log_time("eval", time.perf_counter() - start_eval_time) + + # Print metrics and update progress bar + desc = f"Epoch... ({epoch + 1}/{num_epochs} | Eval Loss: {eval_metrics['loss']})" + epochs.write(desc) + epochs.desc = desc + + return eval_metrics + + def run_save_model(state, eval_metrics=None): + if jax.process_index() == 0: + + start_save_time = time.perf_counter() + output_dir = training_args.output_dir + use_bucket = output_dir.startswith("gs://") + if use_bucket: + bucket_path = Path(output_dir[5:]) / wandb.run.id / f"step_{state.step}" + bucket, dir_path = str(bucket_path).split("/", 1) + tmp_dir = tempfile.TemporaryDirectory() + output_dir = tmp_dir.name + + # save model + params = jax.device_get(state.params) + model.save_pretrained( + output_dir, + params=params, + ) + + # save tokenizer + tokenizer.save_pretrained(output_dir) + + # copy to bucket + if use_bucket: + client = storage.Client() + bucket = client.bucket(bucket) + for filename in Path(output_dir).glob("*"): + blob_name = str(Path(dir_path) / "model" / filename.name) + blob = bucket.blob(blob_name) + blob.upload_from_filename(str(filename)) + tmp_dir.cleanup() + + # save state + opt_state = jax.device_get(state.opt_state) + if use_bucket: + blob_name = str(Path(dir_path) / "state" / "opt_state.msgpack") + blob = bucket.blob(blob_name) + blob.upload_from_file(io.BytesIO(to_bytes(opt_state))) + else: + with (Path(output_dir) / "opt_state.msgpack").open("wb") as f: + f.write(to_bytes(opt_state)) + + # save to W&B + if training_args.log_model: + # save some space + c = wandb.wandb_sdk.wandb_artifacts.get_artifacts_cache() + c.cleanup(wandb.util.from_human_size("20GB")) + + metadata = { + k: jax.device_get(getattr(state, k)).item() + for k in ["step", "epoch", "train_time", "train_samples"] + } + metadata["num_params"] = num_params + if eval_metrics is not None: + metadata["eval"] = eval_metrics + + # create model artifact + if use_bucket: + metadata["bucket_path"] = f"gs://{bucket_path}/model" + artifact = wandb.Artifact( + name=f"model-{wandb.run.id}", + type="DalleBart_model", + metadata=metadata, + ) + if use_bucket: + artifact.add_reference(metadata["bucket_path"]) + else: + for filename in [ + "config.json", + "flax_model.msgpack", + "merges.txt", + "special_tokens_map.json", + "tokenizer.json", + "tokenizer_config.json", + "vocab.json", + ]: + artifact.add_file( + f"{Path(training_args.output_dir) / filename}" + ) + wandb.run.log_artifact(artifact) + + # create state artifact + if use_bucket: + metadata["bucket_path"] = f"gs://{bucket_path}/state" + artifact_state = wandb.Artifact( + name=f"state-{wandb.run.id}", + type="DalleBart_state", + metadata=metadata, + ) + if use_bucket: + artifact_state.add_reference(metadata["bucket_path"]) + else: + artifact_state.add_file( + f"{Path(training_args.output_dir) / 'opt_state.msgpack'}" + ) + wandb.run.log_artifact(artifact_state) + metrics_logger.log_time("save_model", time.perf_counter() - start_save_time) + + logger.info(" Ready to start training") + with mesh: + for epoch in epochs: + state.replace(epoch=epoch) + local_state["epoch"] = epoch + # ======================== Training ================================ + metrics_logger.update_state_metrics(local_state) + metrics_logger.log({}) + + # Generate an epoch by shuffling sampling indices from the train dataset + train_loader = dataset.dataloader( + "train", + batch_size_per_node, + epoch, + ) + # train + for batch in tqdm( + train_loader, + desc="Training...", + position=1, + leave=False, + total=steps_per_epoch, + disable=jax.process_index() > 0, + ): + # calculate delta time (we have a lag of one step but it's ok) + train_time = time.perf_counter() - start_time + + # set correct shape to batch + # - add grad_step dim if gradient_accumulation_steps > 1 + # - split per dp device if not multi-host for vmap trick (does not work in multi-host) + bs_shape = ( + (batch_size_per_node_per_grad_step,) + if not use_vmap_trick + else ( + jax.local_device_count() + // training_args.mp_devices, # local dp devices + training_args.per_device_train_batch_size, + ) + ) + if training_args.gradient_accumulation_steps > 1: + # reshape data into (gradient_accumulation_steps, batch_per_node, ...) + # to avoid any data redistribution when sharding + bs_shape = (training_args.gradient_accumulation_steps,) + bs_shape + + # reshape batch + batch = jax.tree_map( + lambda x: x.reshape(bs_shape + x.shape[1:]), + batch, + ) + # freeze batch to pass safely to jax transforms + batch = freeze(batch) + + # train step + state, train_metrics = p_train_step(state, batch, train_time) + local_state["step"] += 1 + local_state["train_time"] = train_time + local_state["train_samples"] += batch_size_per_step + + if ( + local_state["step"] % training_args.logging_steps == 0 + and jax.process_index() == 0 + ): + metrics_logger.update_state_metrics(local_state) + metrics_logger.log(train_metrics, prefix="train") + + eval_metrics = None + if local_state["step"] % training_args.eval_steps == 0: + eval_metrics = run_evaluation() + + if local_state["step"] % training_args.save_steps == 0: + run_save_model(state, eval_metrics) + + # log final train metrics + if train_metrics is not None: + metrics_logger.update_state_metrics(state) + metrics_logger.log(train_metrics, prefix="train") + + epochs.write( + f"Epoch... ({epoch + 1}/{num_epochs} | Loss: {train_metrics['loss']}, Learning Rate: {train_metrics['learning_rate']})" + ) + + # Final evaluation + eval_metrics = run_evaluation() + + # save checkpoint after each epoch + run_save_model(state, eval_metrics) + + +if __name__ == "__main__": + main()